/* Data References Analysis and Manipulation Utilities for Vectorization.
- Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
+ Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
Free Software Foundation, Inc.
Contributed by Dorit Naishlos <dorit@il.ibm.com>
and Ira Rosen <irar@il.ibm.com>
#include "tm.h"
#include "ggc.h"
#include "tree.h"
+#include "tm_p.h"
#include "target.h"
#include "basic-block.h"
#include "tree-pretty-print.h"
#include "tree-chrec.h"
#include "tree-scalar-evolution.h"
#include "tree-vectorizer.h"
-#include "toplev.h"
+#include "diagnostic-core.h"
/* Need to include rtl.h, expr.h, etc. for optabs. */
#include "expr.h"
#include "optabs.h"
+/* Return true if load- or store-lanes optab OPTAB is implemented for
+ COUNT vectors of type VECTYPE. NAME is the name of OPTAB. */
+
+static bool
+vect_lanes_optab_supported_p (const char *name, convert_optab optab,
+ tree vectype, unsigned HOST_WIDE_INT count)
+{
+ enum machine_mode mode, array_mode;
+ bool limit_p;
+
+ mode = TYPE_MODE (vectype);
+ limit_p = !targetm.array_mode_supported_p (mode, count);
+ array_mode = mode_for_size (count * GET_MODE_BITSIZE (mode),
+ MODE_INT, limit_p);
+
+ if (array_mode == BLKmode)
+ {
+ if (vect_print_dump_info (REPORT_DETAILS))
+ fprintf (vect_dump, "no array mode for %s[" HOST_WIDE_INT_PRINT_DEC "]",
+ GET_MODE_NAME (mode), count);
+ return false;
+ }
+
+ if (convert_optab_handler (optab, array_mode, mode) == CODE_FOR_nothing)
+ {
+ if (vect_print_dump_info (REPORT_DETAILS))
+ fprintf (vect_dump, "cannot use %s<%s><%s>",
+ name, GET_MODE_NAME (array_mode), GET_MODE_NAME (mode));
+ return false;
+ }
+
+ if (vect_print_dump_info (REPORT_DETAILS))
+ fprintf (vect_dump, "can use %s<%s><%s>",
+ name, GET_MODE_NAME (array_mode), GET_MODE_NAME (mode));
+
+ return true;
+}
+
+
/* Return the smallest scalar part of STMT.
- This is used to determine the vectype of the stmt. We generally set the
- vectype according to the type of the result (lhs). For stmts whose
+ This is used to determine the vectype of the stmt. We generally set the
+ vectype according to the type of the result (lhs). For stmts whose
result-type is different than the type of the arguments (e.g., demotion,
promotion), vectype will be reset appropriately (later). Note that we have
to visit the smallest datatype in this function, because that determines the
- VF. If the smallest datatype in the loop is present only as the rhs of a
+ VF. If the smallest datatype in the loop is present only as the rhs of a
promotion operation - we'd miss it.
Such a case, where a variable of this datatype does not appear in the lhs
anywhere in the loop, can only occur if it's an invariant: e.g.:
'int_x = (int) short_inv', which we'd expect to have been optimized away by
- invariant motion. However, we cannot rely on invariant motion to always take
- invariants out of the loop, and so in the case of promotion we also have to
- check the rhs.
+ invariant motion. However, we cannot rely on invariant motion to always
+ take invariants out of the loop, and so in the case of promotion we also
+ have to check the rhs.
LHS_SIZE_UNIT and RHS_SIZE_UNIT contain the sizes of the corresponding
types. */
/* Find the place of the data-ref in STMT in the interleaving chain that starts
- from FIRST_STMT. Return -1 if the data-ref is not a part of the chain. */
+ from FIRST_STMT. Return -1 if the data-ref is not a part of the chain. */
int
vect_get_place_in_interleaving_chain (gimple stmt, gimple first_stmt)
gimple next_stmt = first_stmt;
int result = 0;
- if (first_stmt != DR_GROUP_FIRST_DR (vinfo_for_stmt (stmt)))
+ if (first_stmt != GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
return -1;
while (next_stmt && next_stmt != stmt)
{
result++;
- next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt));
+ next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
}
if (next_stmt)
stmt_vec_info stmtinfo_a = vinfo_for_stmt (DR_STMT (dra));
stmt_vec_info stmtinfo_b = vinfo_for_stmt (DR_STMT (drb));
- prev = DR_GROUP_FIRST_DR (stmtinfo_b);
- next = DR_GROUP_NEXT_DR (vinfo_for_stmt (prev));
+ prev = GROUP_FIRST_ELEMENT (stmtinfo_b);
+ next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (prev));
while (next)
{
next_init = DR_INIT (STMT_VINFO_DATA_REF (vinfo_for_stmt (next)));
if (tree_int_cst_compare (next_init, DR_INIT (dra)) > 0)
{
/* Insert here. */
- DR_GROUP_NEXT_DR (vinfo_for_stmt (prev)) = DR_STMT (dra);
- DR_GROUP_NEXT_DR (stmtinfo_a) = next;
+ GROUP_NEXT_ELEMENT (vinfo_for_stmt (prev)) = DR_STMT (dra);
+ GROUP_NEXT_ELEMENT (stmtinfo_a) = next;
return;
}
prev = next;
- next = DR_GROUP_NEXT_DR (vinfo_for_stmt (prev));
+ next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (prev));
}
/* We got to the end of the list. Insert here. */
- DR_GROUP_NEXT_DR (vinfo_for_stmt (prev)) = DR_STMT (dra);
- DR_GROUP_NEXT_DR (stmtinfo_a) = NULL;
+ GROUP_NEXT_ELEMENT (vinfo_for_stmt (prev)) = DR_STMT (dra);
+ GROUP_NEXT_ELEMENT (stmtinfo_a) = NULL;
}
/* Function vect_update_interleaving_chain.
For two data-refs DRA and DRB that are a part of a chain interleaved data
- accesses, update the interleaving chain. DRB's INIT is smaller than DRA's.
+ accesses, update the interleaving chain. DRB's INIT is smaller than DRA's.
There are four possible cases:
1. New stmts - both DRA and DRB are not a part of any chain:
gimple node, prev, next, first_stmt;
/* 1. New stmts - both DRA and DRB are not a part of any chain. */
- if (!DR_GROUP_FIRST_DR (stmtinfo_a) && !DR_GROUP_FIRST_DR (stmtinfo_b))
+ if (!GROUP_FIRST_ELEMENT (stmtinfo_a) && !GROUP_FIRST_ELEMENT (stmtinfo_b))
{
- DR_GROUP_FIRST_DR (stmtinfo_a) = DR_STMT (drb);
- DR_GROUP_FIRST_DR (stmtinfo_b) = DR_STMT (drb);
- DR_GROUP_NEXT_DR (stmtinfo_b) = DR_STMT (dra);
+ GROUP_FIRST_ELEMENT (stmtinfo_a) = DR_STMT (drb);
+ GROUP_FIRST_ELEMENT (stmtinfo_b) = DR_STMT (drb);
+ GROUP_NEXT_ELEMENT (stmtinfo_b) = DR_STMT (dra);
return;
}
/* 2. DRB is a part of a chain and DRA is not. */
- if (!DR_GROUP_FIRST_DR (stmtinfo_a) && DR_GROUP_FIRST_DR (stmtinfo_b))
+ if (!GROUP_FIRST_ELEMENT (stmtinfo_a) && GROUP_FIRST_ELEMENT (stmtinfo_b))
{
- DR_GROUP_FIRST_DR (stmtinfo_a) = DR_GROUP_FIRST_DR (stmtinfo_b);
+ GROUP_FIRST_ELEMENT (stmtinfo_a) = GROUP_FIRST_ELEMENT (stmtinfo_b);
/* Insert DRA into the chain of DRB. */
vect_insert_into_interleaving_chain (dra, drb);
return;
}
/* 3. DRA is a part of a chain and DRB is not. */
- if (DR_GROUP_FIRST_DR (stmtinfo_a) && !DR_GROUP_FIRST_DR (stmtinfo_b))
+ if (GROUP_FIRST_ELEMENT (stmtinfo_a) && !GROUP_FIRST_ELEMENT (stmtinfo_b))
{
- gimple old_first_stmt = DR_GROUP_FIRST_DR (stmtinfo_a);
+ gimple old_first_stmt = GROUP_FIRST_ELEMENT (stmtinfo_a);
tree init_old = DR_INIT (STMT_VINFO_DATA_REF (vinfo_for_stmt (
old_first_stmt)));
gimple tmp;
if (tree_int_cst_compare (init_old, DR_INIT (drb)) > 0)
{
/* DRB's init is smaller than the init of the stmt previously marked
- as the first stmt of the interleaving chain of DRA. Therefore, we
+ as the first stmt of the interleaving chain of DRA. Therefore, we
update FIRST_STMT and put DRB in the head of the list. */
- DR_GROUP_FIRST_DR (stmtinfo_b) = DR_STMT (drb);
- DR_GROUP_NEXT_DR (stmtinfo_b) = old_first_stmt;
+ GROUP_FIRST_ELEMENT (stmtinfo_b) = DR_STMT (drb);
+ GROUP_NEXT_ELEMENT (stmtinfo_b) = old_first_stmt;
/* Update all the stmts in the list to point to the new FIRST_STMT. */
tmp = old_first_stmt;
while (tmp)
{
- DR_GROUP_FIRST_DR (vinfo_for_stmt (tmp)) = DR_STMT (drb);
- tmp = DR_GROUP_NEXT_DR (vinfo_for_stmt (tmp));
+ GROUP_FIRST_ELEMENT (vinfo_for_stmt (tmp)) = DR_STMT (drb);
+ tmp = GROUP_NEXT_ELEMENT (vinfo_for_stmt (tmp));
}
}
else
{
/* Insert DRB in the list of DRA. */
vect_insert_into_interleaving_chain (drb, dra);
- DR_GROUP_FIRST_DR (stmtinfo_b) = DR_GROUP_FIRST_DR (stmtinfo_a);
+ GROUP_FIRST_ELEMENT (stmtinfo_b) = GROUP_FIRST_ELEMENT (stmtinfo_a);
}
return;
}
/* 4. both DRA and DRB are in some interleaving chains. */
- first_a = DR_GROUP_FIRST_DR (stmtinfo_a);
- first_b = DR_GROUP_FIRST_DR (stmtinfo_b);
+ first_a = GROUP_FIRST_ELEMENT (stmtinfo_a);
+ first_b = GROUP_FIRST_ELEMENT (stmtinfo_b);
if (first_a == first_b)
return;
init_dra_chain = DR_INIT (STMT_VINFO_DATA_REF (vinfo_for_stmt (first_a)));
/* Insert the nodes of DRA chain into the DRB chain.
After inserting a node, continue from this node of the DRB chain (don't
start from the beginning. */
- node = DR_GROUP_FIRST_DR (stmtinfo_a);
- prev = DR_GROUP_FIRST_DR (stmtinfo_b);
+ node = GROUP_FIRST_ELEMENT (stmtinfo_a);
+ prev = GROUP_FIRST_ELEMENT (stmtinfo_b);
first_stmt = first_b;
}
else
/* Insert the nodes of DRB chain into the DRA chain.
After inserting a node, continue from this node of the DRA chain (don't
start from the beginning. */
- node = DR_GROUP_FIRST_DR (stmtinfo_b);
- prev = DR_GROUP_FIRST_DR (stmtinfo_a);
+ node = GROUP_FIRST_ELEMENT (stmtinfo_b);
+ prev = GROUP_FIRST_ELEMENT (stmtinfo_a);
first_stmt = first_a;
}
while (node)
{
node_init = DR_INIT (STMT_VINFO_DATA_REF (vinfo_for_stmt (node)));
- next = DR_GROUP_NEXT_DR (vinfo_for_stmt (prev));
+ next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (prev));
while (next)
{
next_init = DR_INIT (STMT_VINFO_DATA_REF (vinfo_for_stmt (next)));
if (tree_int_cst_compare (next_init, node_init) > 0)
{
/* Insert here. */
- DR_GROUP_NEXT_DR (vinfo_for_stmt (prev)) = node;
- DR_GROUP_NEXT_DR (vinfo_for_stmt (node)) = next;
+ GROUP_NEXT_ELEMENT (vinfo_for_stmt (prev)) = node;
+ GROUP_NEXT_ELEMENT (vinfo_for_stmt (node)) = next;
prev = node;
break;
}
prev = next;
- next = DR_GROUP_NEXT_DR (vinfo_for_stmt (prev));
+ next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (prev));
}
if (!next)
{
/* We got to the end of the list. Insert here. */
- DR_GROUP_NEXT_DR (vinfo_for_stmt (prev)) = node;
- DR_GROUP_NEXT_DR (vinfo_for_stmt (node)) = NULL;
+ GROUP_NEXT_ELEMENT (vinfo_for_stmt (prev)) = node;
+ GROUP_NEXT_ELEMENT (vinfo_for_stmt (node)) = NULL;
prev = node;
}
- DR_GROUP_FIRST_DR (vinfo_for_stmt (node)) = first_stmt;
- node = DR_GROUP_NEXT_DR (vinfo_for_stmt (node));
+ GROUP_FIRST_ELEMENT (vinfo_for_stmt (node)) = first_stmt;
+ node = GROUP_NEXT_ELEMENT (vinfo_for_stmt (node));
}
}
-
-/* Function vect_equal_offsets.
-
- Check if OFFSET1 and OFFSET2 are identical expressions. */
+/* Check dependence between DRA and DRB for basic block vectorization.
+ If the accesses share same bases and offsets, we can compare their initial
+ constant offsets to decide whether they differ or not. In case of a read-
+ write dependence we check that the load is before the store to ensure that
+ vectorization will not change the order of the accesses. */
static bool
-vect_equal_offsets (tree offset1, tree offset2)
+vect_drs_dependent_in_basic_block (struct data_reference *dra,
+ struct data_reference *drb)
{
- bool res;
+ HOST_WIDE_INT type_size_a, type_size_b, init_a, init_b;
+ gimple earlier_stmt;
- STRIP_NOPS (offset1);
- STRIP_NOPS (offset2);
+ /* We only call this function for pairs of loads and stores, but we verify
+ it here. */
+ if (DR_IS_READ (dra) == DR_IS_READ (drb))
+ {
+ if (DR_IS_READ (dra))
+ return false;
+ else
+ return true;
+ }
- if (offset1 == offset2)
+ /* Check that the data-refs have same bases and offsets. If not, we can't
+ determine if they are dependent. */
+ if (!operand_equal_p (DR_BASE_ADDRESS (dra), DR_BASE_ADDRESS (drb), 0)
+ || !dr_equal_offsets_p (dra, drb))
return true;
- if (TREE_CODE (offset1) != TREE_CODE (offset2)
- || (!BINARY_CLASS_P (offset1) && !UNARY_CLASS_P (offset1)))
- return false;
+ /* Check the types. */
+ type_size_a = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dra))));
+ type_size_b = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (drb))));
- res = vect_equal_offsets (TREE_OPERAND (offset1, 0),
- TREE_OPERAND (offset2, 0));
+ if (type_size_a != type_size_b
+ || !types_compatible_p (TREE_TYPE (DR_REF (dra)),
+ TREE_TYPE (DR_REF (drb))))
+ return true;
+
+ init_a = TREE_INT_CST_LOW (DR_INIT (dra));
+ init_b = TREE_INT_CST_LOW (DR_INIT (drb));
- if (!res || !BINARY_CLASS_P (offset1))
- return res;
+ /* Two different locations - no dependence. */
+ if (init_a != init_b)
+ return false;
- res = vect_equal_offsets (TREE_OPERAND (offset1, 1),
- TREE_OPERAND (offset2, 1));
+ /* We have a read-write dependence. Check that the load is before the store.
+ When we vectorize basic blocks, vector load can be only before
+ corresponding scalar load, and vector store can be only after its
+ corresponding scalar store. So the order of the acceses is preserved in
+ case the load is before the store. */
+ earlier_stmt = get_earlier_stmt (DR_STMT (dra), DR_STMT (drb));
+ if (DR_IS_READ (STMT_VINFO_DATA_REF (vinfo_for_stmt (earlier_stmt))))
+ return false;
- return res;
+ return true;
}
/* Function vect_check_interleaving.
- Check if DRA and DRB are a part of interleaving. In case they are, insert
+ Check if DRA and DRB are a part of interleaving. In case they are, insert
DRA and DRB in an interleaving chain. */
static bool
/* Check that the data-refs have same first location (except init) and they
are both either store or load (not load and store). */
- if ((DR_BASE_ADDRESS (dra) != DR_BASE_ADDRESS (drb)
- && (TREE_CODE (DR_BASE_ADDRESS (dra)) != ADDR_EXPR
- || TREE_CODE (DR_BASE_ADDRESS (drb)) != ADDR_EXPR
- || TREE_OPERAND (DR_BASE_ADDRESS (dra), 0)
- != TREE_OPERAND (DR_BASE_ADDRESS (drb),0)))
- || !vect_equal_offsets (DR_OFFSET (dra), DR_OFFSET (drb))
+ if (!operand_equal_p (DR_BASE_ADDRESS (dra), DR_BASE_ADDRESS (drb), 0)
+ || !dr_equal_offsets_p (dra, drb)
|| !tree_int_cst_compare (DR_INIT (dra), DR_INIT (drb))
|| DR_IS_READ (dra) != DR_IS_READ (drb))
return false;
gimple stmt_j = DR_STMT (dr_j);
if (operand_equal_p (DR_REF (dr_i), DR_REF (dr_j), 0)
- || (DR_GROUP_FIRST_DR (vinfo_for_stmt (stmt_i))
- && DR_GROUP_FIRST_DR (vinfo_for_stmt (stmt_j))
- && (DR_GROUP_FIRST_DR (vinfo_for_stmt (stmt_i))
- == DR_GROUP_FIRST_DR (vinfo_for_stmt (stmt_j)))))
+ || (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt_i))
+ && GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt_j))
+ && (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt_i))
+ == GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt_j)))))
return true;
else
return false;
if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know)
{
+ gimple earlier_stmt;
+
if (loop_vinfo)
{
if (vect_print_dump_info (REPORT_DR_DETAILS))
if (vect_check_interleaving (dra, drb))
return false;
+ /* Read-read is OK (we need this check here, after checking for
+ interleaving). */
+ if (DR_IS_READ (dra) && DR_IS_READ (drb))
+ return false;
+
if (vect_print_dump_info (REPORT_DR_DETAILS))
{
fprintf (vect_dump, "can't determine dependence between ");
print_generic_expr (vect_dump, DR_REF (drb), TDF_SLIM);
}
- /* Mark the statements as unvectorizable. */
- STMT_VINFO_VECTORIZABLE (stmtinfo_a) = false;
- STMT_VINFO_VECTORIZABLE (stmtinfo_b) = false;
+ /* We do not vectorize basic blocks with write-write dependencies. */
+ if (DR_IS_WRITE (dra) && DR_IS_WRITE (drb))
+ return true;
+
+ /* Check that it's not a load-after-store dependence. */
+ earlier_stmt = get_earlier_stmt (DR_STMT (dra), DR_STMT (drb));
+ if (DR_IS_WRITE (STMT_VINFO_DATA_REF (vinfo_for_stmt (earlier_stmt))))
+ return true;
return false;
}
print_generic_expr (vect_dump, DR_REF (drb), TDF_SLIM);
}
- return true;
+ /* Do not vectorize basic blcoks with write-write dependences. */
+ if (DR_IS_WRITE (dra) && DR_IS_WRITE (drb))
+ return true;
+
+ /* Check if this dependence is allowed in basic block vectorization. */
+ return vect_drs_dependent_in_basic_block (dra, drb);
}
/* Loop-based vectorization and known data dependence. */
}
loop_depth = index_in_loop_nest (loop->num, DDR_LOOP_NEST (ddr));
- for (i = 0; VEC_iterate (lambda_vector, DDR_DIST_VECTS (ddr), i, dist_v); i++)
+ FOR_EACH_VEC_ELT (lambda_vector, DDR_DIST_VECTS (ddr), i, dist_v)
{
int dist = dist_v[loop_depth];
/* For interleaving, mark that there is a read-write dependency if
necessary. We check before that one of the data-refs is store. */
if (DR_IS_READ (dra))
- DR_GROUP_READ_WRITE_DEPENDENCE (stmtinfo_a) = true;
+ GROUP_READ_WRITE_DEPENDENCE (stmtinfo_a) = true;
else
{
if (DR_IS_READ (drb))
- DR_GROUP_READ_WRITE_DEPENDENCE (stmtinfo_b) = true;
+ GROUP_READ_WRITE_DEPENDENCE (stmtinfo_b) = true;
}
continue;
else
ddrs = BB_VINFO_DDRS (bb_vinfo);
- for (i = 0; VEC_iterate (ddr_p, ddrs, i, ddr); i++)
+ FOR_EACH_VEC_ELT (ddr_p, ddrs, i, ddr)
if (vect_analyze_data_ref_dependence (ddr, loop_vinfo, max_vf))
return false;
/* In case the dataref is in an inner-loop of the loop that is being
vectorized (LOOP), we use the base and misalignment information
- relative to the outer-loop (LOOP). This is ok only if the misalignment
+ relative to the outer-loop (LOOP). This is ok only if the misalignment
stays the same throughout the execution of the inner-loop, which is why
we have to check that the stride of the dataref in the inner-loop evenly
divides by the vector size. */
|| (TREE_CODE (base_addr) == SSA_NAME
&& tree_int_cst_compare (ssize_int (TYPE_ALIGN_UNIT (TREE_TYPE (
TREE_TYPE (base_addr)))),
- alignment) >= 0))
+ alignment) >= 0)
+ || (get_pointer_alignment (base_addr) >= TYPE_ALIGN (vectype)))
base_aligned = true;
else
base_aligned = false;
|| (TREE_CODE (base) == VAR_DECL
&& DECL_ALIGN (base) >= TYPE_ALIGN (vectype)));
+ /* If this is a backward running DR then first access in the larger
+ vectype actually is N-1 elements before the address in the DR.
+ Adjust misalign accordingly. */
+ if (tree_int_cst_compare (DR_STEP (dr), size_zero_node) < 0)
+ {
+ tree offset = ssize_int (TYPE_VECTOR_SUBPARTS (vectype) - 1);
+ /* DR_STEP(dr) is the same as -TYPE_SIZE of the scalar type,
+ otherwise we wouldn't be here. */
+ offset = fold_build2 (MULT_EXPR, ssizetype, offset, DR_STEP (dr));
+ /* PLUS because DR_STEP was negative. */
+ misalign = size_binop (PLUS_EXPR, misalign, offset);
+ }
+
/* Modulo alignment. */
misalign = size_binop (FLOOR_MOD_EXPR, misalign, alignment);
else
datarefs = BB_VINFO_DATAREFS (bb_vinfo);
- for (i = 0; VEC_iterate (data_reference_p, datarefs, i, dr); i++)
+ FOR_EACH_VEC_ELT (data_reference_p, datarefs, i, dr)
if (STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr)))
&& !vect_compute_data_ref_alignment (dr))
{
/* For interleaved data accesses the step in the loop must be multiplied by
the size of the interleaving group. */
if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
- dr_size *= DR_GROUP_SIZE (vinfo_for_stmt (DR_GROUP_FIRST_DR (stmt_info)));
+ dr_size *= GROUP_SIZE (vinfo_for_stmt (GROUP_FIRST_ELEMENT (stmt_info)));
if (STMT_VINFO_STRIDED_ACCESS (peel_stmt_info))
- dr_peel_size *= DR_GROUP_SIZE (peel_stmt_info);
+ dr_peel_size *= GROUP_SIZE (peel_stmt_info);
/* It can be assumed that the data refs with the same alignment as dr_peel
are aligned in the vector loop. */
same_align_drs
= STMT_VINFO_SAME_ALIGN_REFS (vinfo_for_stmt (DR_STMT (dr_peel)));
- for (i = 0; VEC_iterate (dr_p, same_align_drs, i, current_dr); i++)
+ FOR_EACH_VEC_ELT (dr_p, same_align_drs, i, current_dr)
{
if (current_dr != dr)
continue;
if (known_alignment_for_access_p (dr)
&& known_alignment_for_access_p (dr_peel))
{
+ bool negative = tree_int_cst_compare (DR_STEP (dr), size_zero_node) < 0;
int misal = DR_MISALIGNMENT (dr);
tree vectype = STMT_VINFO_VECTYPE (stmt_info);
- misal += npeel * dr_size;
- misal %= GET_MODE_SIZE (TYPE_MODE (vectype));
+ misal += negative ? -npeel * dr_size : npeel * dr_size;
+ misal &= (TYPE_ALIGN (vectype) / BITS_PER_UNIT) - 1;
SET_DR_MISALIGNMENT (dr, misal);
return;
}
else
datarefs = BB_VINFO_DATAREFS (bb_vinfo);
- for (i = 0; VEC_iterate (data_reference_p, datarefs, i, dr); i++)
+ FOR_EACH_VEC_ELT (data_reference_p, datarefs, i, dr)
{
gimple stmt = DR_STMT (dr);
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
/* For interleaving, only the alignment of the first access matters.
Skip statements marked as not vectorizable. */
if ((STMT_VINFO_STRIDED_ACCESS (stmt_info)
- && DR_GROUP_FIRST_DR (stmt_info) != stmt)
+ && GROUP_FIRST_ELEMENT (stmt_info) != stmt)
|| !STMT_VINFO_VECTORIZABLE (stmt_info))
continue;
elem_size = GET_MODE_SIZE (TYPE_MODE (vectype)) / nelements;
mis_in_elements = DR_MISALIGNMENT (dr) / elem_size;
- if ((nelements - mis_in_elements) % DR_GROUP_SIZE (stmt_info))
+ if ((nelements - mis_in_elements) % GROUP_SIZE (stmt_info))
return false;
}
if (!known_alignment_for_access_p (dr))
{
tree type = (TREE_TYPE (DR_REF (dr)));
- tree ba = DR_BASE_OBJECT (dr);
- bool is_packed = false;
+ bool is_packed = contains_packed_reference (DR_REF (dr));
- if (ba)
- is_packed = contains_packed_reference (ba);
+ if (compare_tree_int (TYPE_SIZE (type), TYPE_ALIGN (type)) > 0)
+ is_packed = true;
if (vect_print_dump_info (REPORT_DETAILS))
fprintf (vect_dump, "Unknown misalignment, is_packed = %d",is_packed);
vect_peel_info elem = (vect_peel_info) *slot;
vect_peel_extended_info max = (vect_peel_extended_info) data;
- if (elem->count > max->peel_info.count)
+ if (elem->count > max->peel_info.count
+ || (elem->count == max->peel_info.count
+ && max->peel_info.npeel > elem->npeel))
{
max->peel_info.npeel = elem->npeel;
max->peel_info.count = elem->count;
}
-/* Traverse peeling hash table and calculate cost for each peeling option. Find
- one with the lowest cost. */
+/* Traverse peeling hash table and calculate cost for each peeling option.
+ Find the one with the lowest cost. */
static int
vect_peeling_hash_get_lowest_cost (void **slot, void *data)
VEC (data_reference_p, heap) *datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
struct data_reference *dr;
- for (i = 0; VEC_iterate (data_reference_p, datarefs, i, dr); i++)
+ FOR_EACH_VEC_ELT (data_reference_p, datarefs, i, dr)
{
stmt = DR_STMT (dr);
stmt_info = vinfo_for_stmt (stmt);
/* For interleaving, only the alignment of the first access
matters. */
if (STMT_VINFO_STRIDED_ACCESS (stmt_info)
- && DR_GROUP_FIRST_DR (stmt_info) != stmt)
+ && GROUP_FIRST_ELEMENT (stmt_info) != stmt)
continue;
save_misalignment = DR_MISALIGNMENT (dr);
}
outside_cost += vect_get_known_peeling_cost (loop_vinfo, elem->npeel, &dummy,
- vect_get_single_scalar_iteraion_cost (loop_vinfo));
+ vect_get_single_scalar_iteration_cost (loop_vinfo));
if (inside_cost < min->inside_cost
|| (inside_cost == min->inside_cost && outside_cost < min->outside_cost))
the alignment of data references in the loop.
FOR NOW: we assume that whatever versioning/peeling takes place, only the
- original loop is to be vectorized; Any other loops that are created by
+ original loop is to be vectorized. Any other loops that are created by
the transformations performed in this pass - are not supposed to be
- vectorized. This restriction will be relaxed.
+ vectorized. This restriction will be relaxed.
This pass will require a cost model to guide it whether to apply peeling
- or versioning or a combination of the two. For example, the scheme that
+ or versioning or a combination of the two. For example, the scheme that
intel uses when given a loop with several memory accesses, is as follows:
choose one memory access ('p') which alignment you want to force by doing
- peeling. Then, either (1) generate a loop in which 'p' is aligned and all
+ peeling. Then, either (1) generate a loop in which 'p' is aligned and all
other accesses are not necessarily aligned, or (2) use loop versioning to
generate one loop in which all accesses are aligned, and another loop in
which only 'p' is necessarily aligned.
Aart J.C. Bik, Milind Girkar, Paul M. Grey and Ximmin Tian, International
Journal of Parallel Programming, Vol. 30, No. 2, April 2002.)
- Devising a cost model is the most critical aspect of this work. It will
+ Devising a cost model is the most critical aspect of this work. It will
guide us on which access to peel for, whether to use loop versioning, how
- many versions to create, etc. The cost model will probably consist of
+ many versions to create, etc. The cost model will probably consist of
generic considerations as well as target specific considerations (on
powerpc for example, misaligned stores are more painful than misaligned
loads).
}
}
- These loops are later passed to loop_transform to be vectorized. The
+ These loops are later passed to loop_transform to be vectorized. The
vectorizer will use the alignment information to guide the transformation
(whether to generate regular loads/stores, or with special handling for
misalignment). */
- The cost of peeling (the extra runtime checks, the increase
in code size). */
- for (i = 0; VEC_iterate (data_reference_p, datarefs, i, dr); i++)
+ FOR_EACH_VEC_ELT (data_reference_p, datarefs, i, dr)
{
stmt = DR_STMT (dr);
stmt_info = vinfo_for_stmt (stmt);
+ if (!STMT_VINFO_RELEVANT (stmt_info))
+ continue;
+
/* For interleaving, only the alignment of the first access
matters. */
if (STMT_VINFO_STRIDED_ACCESS (stmt_info)
- && DR_GROUP_FIRST_DR (stmt_info) != stmt)
+ && GROUP_FIRST_ELEMENT (stmt_info) != stmt)
continue;
+ /* For invariant accesses there is nothing to enhance. */
+ if (integer_zerop (DR_STEP (dr)))
+ continue;
+
supportable_dr_alignment = vect_supportable_dr_alignment (dr, true);
do_peeling = vector_alignment_reachable_p (dr);
if (do_peeling)
if (known_alignment_for_access_p (dr))
{
unsigned int npeel_tmp;
+ bool negative = tree_int_cst_compare (DR_STEP (dr),
+ size_zero_node) < 0;
/* Save info about DR in the hash table. */
if (!LOOP_VINFO_PEELING_HTAB (loop_vinfo))
nelements = TYPE_VECTOR_SUBPARTS (vectype);
mis = DR_MISALIGNMENT (dr) / GET_MODE_SIZE (TYPE_MODE (
TREE_TYPE (DR_REF (dr))));
- npeel_tmp = (nelements - mis) % vf;
+ npeel_tmp = (negative
+ ? (mis - nelements) : (nelements - mis))
+ & (nelements - 1);
/* For multiple types, it is possible that the bigger type access
- will have more than one peeling option. E.g., a loop with two
+ will have more than one peeling option. E.g., a loop with two
types: one of size (vector size / 4), and the other one of
- size (vector size / 8). Vectorization factor will 8. If both
+ size (vector size / 8). Vectorization factor will 8. If both
access are misaligned by 3, the first one needs one scalar
- iteration to be aligned, and the second one needs 5. But the
+ iteration to be aligned, and the second one needs 5. But the
the first one will be aligned also by peeling 5 scalar
iterations, and in that case both accesses will be aligned.
Hence, except for the immediate peeling amount, we also want
dr0 = dr;
}
- if (!first_store && !DR_IS_READ (dr))
+ if (!first_store && DR_IS_WRITE (dr))
first_store = dr;
}
if (!supportable_dr_alignment)
{
dr0 = dr;
- if (!first_store && !DR_IS_READ (dr))
+ if (!first_store && DR_IS_WRITE (dr))
first_store = dr;
}
}
if (known_alignment_for_access_p (dr0))
{
+ bool negative = tree_int_cst_compare (DR_STEP (dr0),
+ size_zero_node) < 0;
if (!npeel)
{
/* Since it's known at compile time, compute the number of
count. */
mis = DR_MISALIGNMENT (dr0);
mis /= GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dr0))));
- npeel = nelements - mis;
+ npeel = ((negative ? mis - nelements : nelements - mis)
+ & (nelements - 1));
}
/* For interleaved data access every iteration accesses all the
by the group size. */
stmt_info = vinfo_for_stmt (DR_STMT (dr0));
if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
- npeel /= DR_GROUP_SIZE (stmt_info);
+ npeel /= GROUP_SIZE (stmt_info);
if (vect_print_dump_info (REPORT_DETAILS))
fprintf (vect_dump, "Try peeling by %d", npeel);
}
/* Ensure that all data refs can be vectorized after the peel. */
- for (i = 0; VEC_iterate (data_reference_p, datarefs, i, dr); i++)
+ FOR_EACH_VEC_ELT (data_reference_p, datarefs, i, dr)
{
int save_misalignment;
/* For interleaving, only the alignment of the first access
matters. */
if (STMT_VINFO_STRIDED_ACCESS (stmt_info)
- && DR_GROUP_FIRST_DR (stmt_info) != stmt)
+ && GROUP_FIRST_ELEMENT (stmt_info) != stmt)
continue;
save_misalignment = DR_MISALIGNMENT (dr);
by the peeling factor times the element size of DR_i (MOD the
vectorization factor times the size). Otherwise, the
misalignment of DR_i must be set to unknown. */
- for (i = 0; VEC_iterate (data_reference_p, datarefs, i, dr); i++)
+ FOR_EACH_VEC_ELT (data_reference_p, datarefs, i, dr)
if (dr != dr0)
vect_update_misalignment_for_peel (dr, dr0, npeel);
if (do_versioning)
{
- for (i = 0; VEC_iterate (data_reference_p, datarefs, i, dr); i++)
+ FOR_EACH_VEC_ELT (data_reference_p, datarefs, i, dr)
{
stmt = DR_STMT (dr);
stmt_info = vinfo_for_stmt (stmt);
matters. */
if (aligned_access_p (dr)
|| (STMT_VINFO_STRIDED_ACCESS (stmt_info)
- && DR_GROUP_FIRST_DR (stmt_info) != stmt))
+ && GROUP_FIRST_ELEMENT (stmt_info) != stmt))
continue;
supportable_dr_alignment = vect_supportable_dr_alignment (dr, false);
/* It can now be assumed that the data references in the statements
in LOOP_VINFO_MAY_MISALIGN_STMTS will be aligned in the version
of the loop being vectorized. */
- for (i = 0; VEC_iterate (gimple, may_misalign_stmts, i, stmt); i++)
+ FOR_EACH_VEC_ELT (gimple, may_misalign_stmts, i, stmt)
{
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
dr = STMT_VINFO_DATA_REF (stmt_info);
if (DDR_NUM_DIST_VECTS (ddr) == 0)
return;
+ /* Data-dependence analysis reports a distance vector of zero
+ for data-references that overlap only in the first iteration
+ but have different sign step (see PR45764).
+ So as a sanity check require equal DR_STEP. */
+ if (!operand_equal_p (DR_STEP (dra), DR_STEP (drb), 0))
+ return;
+
loop_depth = index_in_loop_nest (loop->num, DDR_LOOP_NEST (ddr));
- for (i = 0; VEC_iterate (lambda_vector, DDR_DIST_VECTS (ddr), i, dist_v); i++)
+ FOR_EACH_VEC_ELT (lambda_vector, DDR_DIST_VECTS (ddr), i, dist_v)
{
int dist = dist_v[loop_depth];
struct data_dependence_relation *ddr;
unsigned int i;
- for (i = 0; VEC_iterate (ddr_p, ddrs, i, ddr); i++)
+ FOR_EACH_VEC_ELT (ddr_p, ddrs, i, ddr)
vect_find_same_alignment_drs (ddr, loop_vinfo);
}
/* Analyze groups of strided accesses: check that DR belongs to a group of
- strided accesses of legal size, step, etc. Detect gaps, single element
+ strided accesses of legal size, step, etc. Detect gaps, single element
interleaving, and other special cases. Set strided access info.
Collect groups of strided stores for further use in SLP analysis. */
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
HOST_WIDE_INT dr_step = TREE_INT_CST_LOW (step);
- HOST_WIDE_INT stride;
+ HOST_WIDE_INT stride, last_accessed_element = 1;
bool slp_impossible = false;
+ struct loop *loop = NULL;
+
+ if (loop_vinfo)
+ loop = LOOP_VINFO_LOOP (loop_vinfo);
/* For interleaving, STRIDE is STEP counted in elements, i.e., the size of the
interleaving group (including gaps). */
stride = dr_step / type_size;
/* Not consecutive access is possible only if it is a part of interleaving. */
- if (!DR_GROUP_FIRST_DR (vinfo_for_stmt (stmt)))
+ if (!GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
{
/* Check if it this DR is a part of interleaving, and is a single
element of the group that is accessed in the loop. */
&& stride > 0
&& exact_log2 (stride) != -1)
{
- DR_GROUP_FIRST_DR (vinfo_for_stmt (stmt)) = stmt;
- DR_GROUP_SIZE (vinfo_for_stmt (stmt)) = stride;
+ GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = stmt;
+ GROUP_SIZE (vinfo_for_stmt (stmt)) = stride;
if (vect_print_dump_info (REPORT_DR_DETAILS))
{
fprintf (vect_dump, "Detected single element interleaving ");
fprintf (vect_dump, " step ");
print_generic_expr (vect_dump, step, TDF_SLIM);
}
+
+ if (loop_vinfo)
+ {
+ if (vect_print_dump_info (REPORT_DETAILS))
+ fprintf (vect_dump, "Data access with gaps requires scalar "
+ "epilogue loop");
+ if (loop->inner)
+ {
+ if (vect_print_dump_info (REPORT_DETAILS))
+ fprintf (vect_dump, "Peeling for outer loop is not"
+ " supported");
+ return false;
+ }
+
+ LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) = true;
+ }
+
return true;
}
return false;
}
- if (DR_GROUP_FIRST_DR (vinfo_for_stmt (stmt)) == stmt)
+ if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) == stmt)
{
/* First stmt in the interleaving chain. Check the chain. */
- gimple next = DR_GROUP_NEXT_DR (vinfo_for_stmt (stmt));
+ gimple next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmt));
struct data_reference *data_ref = dr;
unsigned int count = 1;
tree next_step;
while (next)
{
- /* Skip same data-refs. In case that two or more stmts share data-ref
- (supported only for loads), we vectorize only the first stmt, and
- the rest get their vectorized loads from the first one. */
+ /* Skip same data-refs. In case that two or more stmts share
+ data-ref (supported only for loads), we vectorize only the first
+ stmt, and the rest get their vectorized loads from the first
+ one. */
if (!tree_int_cst_compare (DR_INIT (data_ref),
DR_INIT (STMT_VINFO_DATA_REF (
vinfo_for_stmt (next)))))
{
- if (!DR_IS_READ (data_ref))
+ if (DR_IS_WRITE (data_ref))
{
if (vect_print_dump_info (REPORT_DETAILS))
fprintf (vect_dump, "Two store stmts share the same dr.");
/* Check that there is no load-store dependencies for this loads
to prevent a case of load-store-load to the same location. */
- if (DR_GROUP_READ_WRITE_DEPENDENCE (vinfo_for_stmt (next))
- || DR_GROUP_READ_WRITE_DEPENDENCE (vinfo_for_stmt (prev)))
+ if (GROUP_READ_WRITE_DEPENDENCE (vinfo_for_stmt (next))
+ || GROUP_READ_WRITE_DEPENDENCE (vinfo_for_stmt (prev)))
{
if (vect_print_dump_info (REPORT_DETAILS))
fprintf (vect_dump,
}
/* For load use the same data-ref load. */
- DR_GROUP_SAME_DR_STMT (vinfo_for_stmt (next)) = prev;
+ GROUP_SAME_DR_STMT (vinfo_for_stmt (next)) = prev;
prev = next;
- next = DR_GROUP_NEXT_DR (vinfo_for_stmt (next));
+ next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next));
continue;
}
+
prev = next;
/* Check that all the accesses have the same STEP. */
{
/* FORNOW: SLP of accesses with gaps is not supported. */
slp_impossible = true;
- if (!DR_IS_READ (data_ref))
+ if (DR_IS_WRITE (data_ref))
{
if (vect_print_dump_info (REPORT_DETAILS))
fprintf (vect_dump, "interleaved store with gaps");
gaps += diff - 1;
}
+ last_accessed_element += diff;
+
/* Store the gap from the previous member of the group. If there is no
- gap in the access, DR_GROUP_GAP is always 1. */
- DR_GROUP_GAP (vinfo_for_stmt (next)) = diff;
+ gap in the access, GROUP_GAP is always 1. */
+ GROUP_GAP (vinfo_for_stmt (next)) = diff;
prev_init = DR_INIT (data_ref);
- next = DR_GROUP_NEXT_DR (vinfo_for_stmt (next));
+ next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next));
/* Count the number of data-refs in the chain. */
count++;
}
/* There is a gap after the last load in the group. This gap is a
difference between the stride and the number of elements. When
there is no gap, this difference should be 0. */
- DR_GROUP_GAP (vinfo_for_stmt (stmt)) = stride - count;
+ GROUP_GAP (vinfo_for_stmt (stmt)) = stride - count;
}
else
{
return false;
}
- /* FORNOW: we handle only interleaving that is a power of 2.
- We don't fail here if it may be still possible to vectorize the
- group using SLP. If not, the size of the group will be checked in
- vect_analyze_operations, and the vectorization will fail. */
- if (exact_log2 (stride) == -1)
- {
- if (vect_print_dump_info (REPORT_DETAILS))
- fprintf (vect_dump, "interleaving is not a power of 2");
-
- if (slp_impossible)
- return false;
- }
-
if (stride == 0)
stride = count;
- DR_GROUP_SIZE (vinfo_for_stmt (stmt)) = stride;
+ GROUP_SIZE (vinfo_for_stmt (stmt)) = stride;
if (vect_print_dump_info (REPORT_DETAILS))
fprintf (vect_dump, "Detected interleaving of size %d", (int)stride);
/* SLP: create an SLP data structure for every interleaving group of
stores for further analysis in vect_analyse_slp. */
- if (!DR_IS_READ (dr) && !slp_impossible)
+ if (DR_IS_WRITE (dr) && !slp_impossible)
{
if (loop_vinfo)
VEC_safe_push (gimple, heap, LOOP_VINFO_STRIDED_STORES (loop_vinfo),
VEC_safe_push (gimple, heap, BB_VINFO_STRIDED_STORES (bb_vinfo),
stmt);
}
+
+ /* There is a gap in the end of the group. */
+ if (stride - last_accessed_element > 0 && loop_vinfo)
+ {
+ if (vect_print_dump_info (REPORT_DETAILS))
+ fprintf (vect_dump, "Data access with gaps requires scalar "
+ "epilogue loop");
+ if (loop->inner)
+ {
+ if (vect_print_dump_info (REPORT_DETAILS))
+ fprintf (vect_dump, "Peeling for outer loop is not supported");
+ return false;
+ }
+
+ LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) = true;
+ }
}
return true;
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
struct loop *loop = NULL;
- HOST_WIDE_INT dr_step = TREE_INT_CST_LOW (step);
+ HOST_WIDE_INT dr_step;
if (loop_vinfo)
loop = LOOP_VINFO_LOOP (loop_vinfo);
return false;
}
- /* Don't allow invariant accesses in loops. */
+ /* Allow invariant loads in loops. */
+ dr_step = TREE_INT_CST_LOW (step);
if (loop_vinfo && dr_step == 0)
- return false;
+ {
+ GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = NULL;
+ return DR_IS_READ (dr);
+ }
if (loop && nested_in_vect_loop_p (loop, stmt))
{
/* Interleaved accesses are not yet supported within outer-loop
vectorization for references in the inner-loop. */
- DR_GROUP_FIRST_DR (vinfo_for_stmt (stmt)) = NULL;
+ GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = NULL;
/* For the rest of the analysis we use the outer-loop step. */
step = STMT_VINFO_DR_STEP (stmt_info);
}
/* Consecutive? */
- if (!tree_int_cst_compare (step, TYPE_SIZE_UNIT (scalar_type)))
+ if (!tree_int_cst_compare (step, TYPE_SIZE_UNIT (scalar_type))
+ || (dr_step < 0
+ && !compare_tree_int (TYPE_SIZE_UNIT (scalar_type), -dr_step)))
{
/* Mark that it is not interleaving. */
- DR_GROUP_FIRST_DR (vinfo_for_stmt (stmt)) = NULL;
+ GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = NULL;
return true;
}
else
datarefs = BB_VINFO_DATAREFS (bb_vinfo);
- for (i = 0; VEC_iterate (data_reference_p, datarefs, i, dr); i++)
+ FOR_EACH_VEC_ELT (data_reference_p, datarefs, i, dr)
if (STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr)))
&& !vect_analyze_data_ref_access (dr))
{
return true;
}
+/* Check whether a non-affine read in stmt is suitable for gather load
+ and if so, return a builtin decl for that operation. */
+
+tree
+vect_check_gather (gimple stmt, loop_vec_info loop_vinfo, tree *basep,
+ tree *offp, int *scalep)
+{
+ HOST_WIDE_INT scale = 1, pbitpos, pbitsize;
+ struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+ stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
+ struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
+ tree offtype = NULL_TREE;
+ tree decl, base, off;
+ enum machine_mode pmode;
+ int punsignedp, pvolatilep;
+
+ /* The gather builtins need address of the form
+ loop_invariant + vector * {1, 2, 4, 8}
+ or
+ loop_invariant + sign_extend (vector) * { 1, 2, 4, 8 }.
+ Unfortunately DR_BASE_ADDRESS/DR_OFFSET can be a mixture
+ of loop invariants/SSA_NAMEs defined in the loop, with casts,
+ multiplications and additions in it. To get a vector, we need
+ a single SSA_NAME that will be defined in the loop and will
+ contain everything that is not loop invariant and that can be
+ vectorized. The following code attempts to find such a preexistng
+ SSA_NAME OFF and put the loop invariants into a tree BASE
+ that can be gimplified before the loop. */
+ base = get_inner_reference (DR_REF (dr), &pbitsize, &pbitpos, &off,
+ &pmode, &punsignedp, &pvolatilep, false);
+ gcc_assert (base != NULL_TREE && (pbitpos % BITS_PER_UNIT) == 0);
+
+ if (TREE_CODE (base) == MEM_REF)
+ {
+ if (!integer_zerop (TREE_OPERAND (base, 1)))
+ {
+ if (off == NULL_TREE)
+ {
+ double_int moff = mem_ref_offset (base);
+ off = double_int_to_tree (sizetype, moff);
+ }
+ else
+ off = size_binop (PLUS_EXPR, off,
+ fold_convert (sizetype, TREE_OPERAND (base, 1)));
+ }
+ base = TREE_OPERAND (base, 0);
+ }
+ else
+ base = build_fold_addr_expr (base);
+
+ if (off == NULL_TREE)
+ off = size_zero_node;
+
+ /* If base is not loop invariant, either off is 0, then we start with just
+ the constant offset in the loop invariant BASE and continue with base
+ as OFF, otherwise give up.
+ We could handle that case by gimplifying the addition of base + off
+ into some SSA_NAME and use that as off, but for now punt. */
+ if (!expr_invariant_in_loop_p (loop, base))
+ {
+ if (!integer_zerop (off))
+ return NULL_TREE;
+ off = base;
+ base = size_int (pbitpos / BITS_PER_UNIT);
+ }
+ /* Otherwise put base + constant offset into the loop invariant BASE
+ and continue with OFF. */
+ else
+ {
+ base = fold_convert (sizetype, base);
+ base = size_binop (PLUS_EXPR, base, size_int (pbitpos / BITS_PER_UNIT));
+ }
+
+ /* OFF at this point may be either a SSA_NAME or some tree expression
+ from get_inner_reference. Try to peel off loop invariants from it
+ into BASE as long as possible. */
+ STRIP_NOPS (off);
+ while (offtype == NULL_TREE)
+ {
+ enum tree_code code;
+ tree op0, op1, add = NULL_TREE;
+
+ if (TREE_CODE (off) == SSA_NAME)
+ {
+ gimple def_stmt = SSA_NAME_DEF_STMT (off);
+
+ if (expr_invariant_in_loop_p (loop, off))
+ return NULL_TREE;
+
+ if (gimple_code (def_stmt) != GIMPLE_ASSIGN)
+ break;
+
+ op0 = gimple_assign_rhs1 (def_stmt);
+ code = gimple_assign_rhs_code (def_stmt);
+ op1 = gimple_assign_rhs2 (def_stmt);
+ }
+ else
+ {
+ if (get_gimple_rhs_class (TREE_CODE (off)) == GIMPLE_TERNARY_RHS)
+ return NULL_TREE;
+ code = TREE_CODE (off);
+ extract_ops_from_tree (off, &code, &op0, &op1);
+ }
+ switch (code)
+ {
+ case POINTER_PLUS_EXPR:
+ case PLUS_EXPR:
+ if (expr_invariant_in_loop_p (loop, op0))
+ {
+ add = op0;
+ off = op1;
+ do_add:
+ add = fold_convert (sizetype, add);
+ if (scale != 1)
+ add = size_binop (MULT_EXPR, add, size_int (scale));
+ base = size_binop (PLUS_EXPR, base, add);
+ continue;
+ }
+ if (expr_invariant_in_loop_p (loop, op1))
+ {
+ add = op1;
+ off = op0;
+ goto do_add;
+ }
+ break;
+ case MINUS_EXPR:
+ if (expr_invariant_in_loop_p (loop, op1))
+ {
+ add = fold_convert (sizetype, op1);
+ add = size_binop (MINUS_EXPR, size_zero_node, add);
+ off = op0;
+ goto do_add;
+ }
+ break;
+ case MULT_EXPR:
+ if (scale == 1 && host_integerp (op1, 0))
+ {
+ scale = tree_low_cst (op1, 0);
+ off = op0;
+ continue;
+ }
+ break;
+ case SSA_NAME:
+ off = op0;
+ continue;
+ CASE_CONVERT:
+ if (!POINTER_TYPE_P (TREE_TYPE (op0))
+ && !INTEGRAL_TYPE_P (TREE_TYPE (op0)))
+ break;
+ if (TYPE_PRECISION (TREE_TYPE (op0))
+ == TYPE_PRECISION (TREE_TYPE (off)))
+ {
+ off = op0;
+ continue;
+ }
+ if (TYPE_PRECISION (TREE_TYPE (op0))
+ < TYPE_PRECISION (TREE_TYPE (off)))
+ {
+ off = op0;
+ offtype = TREE_TYPE (off);
+ STRIP_NOPS (off);
+ continue;
+ }
+ break;
+ default:
+ break;
+ }
+ break;
+ }
+
+ /* If at the end OFF still isn't a SSA_NAME or isn't
+ defined in the loop, punt. */
+ if (TREE_CODE (off) != SSA_NAME
+ || expr_invariant_in_loop_p (loop, off))
+ return NULL_TREE;
+
+ if (offtype == NULL_TREE)
+ offtype = TREE_TYPE (off);
+
+ decl = targetm.vectorize.builtin_gather (STMT_VINFO_VECTYPE (stmt_info),
+ offtype, scale);
+ if (decl == NULL_TREE)
+ return NULL_TREE;
+
+ if (basep)
+ *basep = base;
+ if (offp)
+ *offp = off;
+ if (scalep)
+ *scalep = scale;
+ return decl;
+}
+
/* Function vect_analyze_data_refs.
VEC (data_reference_p, heap) *datarefs;
struct data_reference *dr;
tree scalar_type;
- bool res;
+ bool res, stop_bb_analysis = false;
if (vect_print_dump_info (REPORT_DETAILS))
fprintf (vect_dump, "=== vect_analyze_data_refs ===\n");
{
loop = LOOP_VINFO_LOOP (loop_vinfo);
res = compute_data_dependences_for_loop
- (loop, true, &LOOP_VINFO_DATAREFS (loop_vinfo),
+ (loop, true,
+ &LOOP_VINFO_LOOP_NEST (loop_vinfo),
+ &LOOP_VINFO_DATAREFS (loop_vinfo),
&LOOP_VINFO_DDRS (loop_vinfo));
if (!res)
datarefs = BB_VINFO_DATAREFS (bb_vinfo);
}
- /* Go through the data-refs, check that the analysis succeeded. Update pointer
- from stmt_vec_info struct to DR and vectype. */
+ /* Go through the data-refs, check that the analysis succeeded. Update
+ pointer from stmt_vec_info struct to DR and vectype. */
- for (i = 0; VEC_iterate (data_reference_p, datarefs, i, dr); i++)
+ FOR_EACH_VEC_ELT (data_reference_p, datarefs, i, dr)
{
gimple stmt;
stmt_vec_info stmt_info;
tree base, offset, init;
+ bool gather = false;
int vf;
if (!dr || !DR_REF (dr))
{
if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
fprintf (vect_dump, "not vectorized: unhandled data-ref ");
+
return false;
}
stmt = DR_STMT (dr);
stmt_info = vinfo_for_stmt (stmt);
+ if (stop_bb_analysis)
+ {
+ STMT_VINFO_VECTORIZABLE (stmt_info) = false;
+ continue;
+ }
+
/* Check that analysis of the data-ref succeeded. */
if (!DR_BASE_ADDRESS (dr) || !DR_OFFSET (dr) || !DR_INIT (dr)
- || !DR_STEP (dr))
+ || !DR_STEP (dr))
+ {
+ /* If target supports vector gather loads, see if they can't
+ be used. */
+ if (loop_vinfo
+ && DR_IS_READ (dr)
+ && !TREE_THIS_VOLATILE (DR_REF (dr))
+ && targetm.vectorize.builtin_gather != NULL
+ && !nested_in_vect_loop_p (loop, stmt))
+ {
+ struct data_reference *newdr
+ = create_data_ref (NULL, loop_containing_stmt (stmt),
+ DR_REF (dr), stmt, true);
+ gcc_assert (newdr != NULL && DR_REF (newdr));
+ if (DR_BASE_ADDRESS (newdr)
+ && DR_OFFSET (newdr)
+ && DR_INIT (newdr)
+ && DR_STEP (newdr)
+ && integer_zerop (DR_STEP (newdr)))
+ {
+ dr = newdr;
+ gather = true;
+ }
+ else
+ free_data_ref (newdr);
+ }
+
+ if (!gather)
+ {
+ if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
+ {
+ fprintf (vect_dump, "not vectorized: data ref analysis "
+ "failed ");
+ print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
+ }
+
+ if (bb_vinfo)
+ {
+ STMT_VINFO_VECTORIZABLE (stmt_info) = false;
+ stop_bb_analysis = true;
+ continue;
+ }
+
+ return false;
+ }
+ }
+
+ if (TREE_CODE (DR_BASE_ADDRESS (dr)) == INTEGER_CST)
+ {
+ if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
+ fprintf (vect_dump, "not vectorized: base addr of dr is a "
+ "constant");
+
+ if (bb_vinfo)
+ {
+ STMT_VINFO_VECTORIZABLE (stmt_info) = false;
+ stop_bb_analysis = true;
+ continue;
+ }
+
+ if (gather)
+ free_data_ref (dr);
+ return false;
+ }
+
+ if (TREE_THIS_VOLATILE (DR_REF (dr)))
{
if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
{
- fprintf (vect_dump, "not vectorized: data ref analysis failed ");
+ fprintf (vect_dump, "not vectorized: volatile type ");
print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
}
if (bb_vinfo)
{
- /* Mark the statement as not vectorizable. */
STMT_VINFO_VECTORIZABLE (stmt_info) = false;
+ stop_bb_analysis = true;
continue;
}
- else
- return false;
+
+ return false;
}
- if (TREE_CODE (DR_BASE_ADDRESS (dr)) == INTEGER_CST)
+ if (stmt_can_throw_internal (stmt))
{
if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
- fprintf (vect_dump, "not vectorized: base addr of dr is a "
- "constant");
+ {
+ fprintf (vect_dump, "not vectorized: statement can throw an "
+ "exception ");
+ print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
+ }
+
if (bb_vinfo)
{
- /* Mark the statement as not vectorizable. */
STMT_VINFO_VECTORIZABLE (stmt_info) = false;
+ stop_bb_analysis = true;
continue;
}
- else
- return false;
+
+ if (gather)
+ free_data_ref (dr);
+ return false;
}
+ if (TREE_CODE (DR_REF (dr)) == COMPONENT_REF
+ && DECL_BIT_FIELD (TREE_OPERAND (DR_REF (dr), 1)))
+ {
+ if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
+ {
+ fprintf (vect_dump, "not vectorized: statement is bitfield "
+ "access ");
+ print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
+ }
+
+ if (bb_vinfo)
+ {
+ STMT_VINFO_VECTORIZABLE (stmt_info) = false;
+ stop_bb_analysis = true;
+ continue;
+ }
+
+ if (gather)
+ free_data_ref (dr);
+ return false;
+ }
+
base = unshare_expr (DR_BASE_ADDRESS (dr));
offset = unshare_expr (DR_OFFSET (dr));
init = unshare_expr (DR_INIT (dr));
+ if (is_gimple_call (stmt))
+ {
+ if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
+ {
+ fprintf (vect_dump, "not vectorized: dr in a call ");
+ print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
+ }
+
+ if (bb_vinfo)
+ {
+ STMT_VINFO_VECTORIZABLE (stmt_info) = false;
+ stop_bb_analysis = true;
+ continue;
+ }
+
+ if (gather)
+ free_data_ref (dr);
+ return false;
+ }
+
/* Update DR field in stmt_vec_info struct. */
/* If the dataref is in an inner-loop of the loop that is considered for
tree dinit;
/* Build a reference to the first location accessed by the
- inner-loop: *(BASE+INIT). (The first location is actually
+ inner-loop: *(BASE+INIT). (The first location is actually
BASE+INIT+OFFSET, but we add OFFSET separately later). */
tree inner_base = build_fold_indirect_ref
- (fold_build2 (POINTER_PLUS_EXPR,
- TREE_TYPE (base), base,
- fold_convert (sizetype, init)));
+ (fold_build_pointer_plus (base, init));
if (vect_print_dump_info (REPORT_DETAILS))
{
"not vectorized: more than one data ref in stmt: ");
print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
}
+
+ if (bb_vinfo)
+ {
+ STMT_VINFO_VECTORIZABLE (stmt_info) = false;
+ stop_bb_analysis = true;
+ continue;
+ }
+
+ if (gather)
+ free_data_ref (dr);
return false;
}
{
/* Mark the statement as not vectorizable. */
STMT_VINFO_VECTORIZABLE (stmt_info) = false;
+ stop_bb_analysis = true;
continue;
}
- else
- return false;
+
+ if (gather)
+ {
+ STMT_VINFO_DATA_REF (stmt_info) = NULL;
+ free_data_ref (dr);
+ }
+ return false;
}
/* Adjust the minimal vectorization factor according to the
vf = TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info));
if (vf > *min_vf)
*min_vf = vf;
+
+ if (gather)
+ {
+ unsigned int j, k, n;
+ struct data_reference *olddr
+ = VEC_index (data_reference_p, datarefs, i);
+ VEC (ddr_p, heap) *ddrs = LOOP_VINFO_DDRS (loop_vinfo);
+ struct data_dependence_relation *ddr, *newddr;
+ bool bad = false;
+ tree off;
+ VEC (loop_p, heap) *nest = LOOP_VINFO_LOOP_NEST (loop_vinfo);
+
+ if (!vect_check_gather (stmt, loop_vinfo, NULL, &off, NULL)
+ || get_vectype_for_scalar_type (TREE_TYPE (off)) == NULL_TREE)
+ {
+ if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
+ {
+ fprintf (vect_dump,
+ "not vectorized: not suitable for gather ");
+ print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
+ }
+ return false;
+ }
+
+ n = VEC_length (data_reference_p, datarefs) - 1;
+ for (j = 0, k = i - 1; j < i; j++)
+ {
+ ddr = VEC_index (ddr_p, ddrs, k);
+ gcc_assert (DDR_B (ddr) == olddr);
+ newddr = initialize_data_dependence_relation (DDR_A (ddr), dr,
+ nest);
+ VEC_replace (ddr_p, ddrs, k, newddr);
+ free_dependence_relation (ddr);
+ if (!bad
+ && DR_IS_WRITE (DDR_A (newddr))
+ && DDR_ARE_DEPENDENT (newddr) != chrec_known)
+ bad = true;
+ k += --n;
+ }
+
+ k++;
+ n = k + VEC_length (data_reference_p, datarefs) - i - 1;
+ for (; k < n; k++)
+ {
+ ddr = VEC_index (ddr_p, ddrs, k);
+ gcc_assert (DDR_A (ddr) == olddr);
+ newddr = initialize_data_dependence_relation (dr, DDR_B (ddr),
+ nest);
+ VEC_replace (ddr_p, ddrs, k, newddr);
+ free_dependence_relation (ddr);
+ if (!bad
+ && DR_IS_WRITE (DDR_B (newddr))
+ && DDR_ARE_DEPENDENT (newddr) != chrec_known)
+ bad = true;
+ }
+
+ k = VEC_length (ddr_p, ddrs)
+ - VEC_length (data_reference_p, datarefs) + i;
+ ddr = VEC_index (ddr_p, ddrs, k);
+ gcc_assert (DDR_A (ddr) == olddr && DDR_B (ddr) == olddr);
+ newddr = initialize_data_dependence_relation (dr, dr, nest);
+ VEC_replace (ddr_p, ddrs, k, newddr);
+ free_dependence_relation (ddr);
+ VEC_replace (data_reference_p, datarefs, i, dr);
+
+ if (bad)
+ {
+ if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
+ {
+ fprintf (vect_dump,
+ "not vectorized: data dependence conflict"
+ " prevents gather");
+ print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
+ }
+ return false;
+ }
+
+ STMT_VINFO_GATHER_P (stmt_info) = true;
+ }
}
return true;
/* Function vect_get_new_vect_var.
- Returns a name for a new variable. The current naming scheme appends the
+ Returns a name for a new variable. The current naming scheme appends the
prefix "vect_" or "vect_p" (depending on the value of VAR_KIND) to
the name of vectorizer generated variables, and appends that to NAME if
provided. */
LOOP: Specify relative to which loop-nest should the address be computed.
For example, when the dataref is in an inner-loop nested in an
outer-loop that is now being vectorized, LOOP can be either the
- outer-loop, or the inner-loop. The first memory location accessed
+ outer-loop, or the inner-loop. The first memory location accessed
by the following dataref ('in' points to short):
for (i=0; i<N; i++)
/* base + base_offset */
if (loop_vinfo)
- addr_base = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (data_ref_base),
- data_ref_base, base_offset);
+ addr_base = fold_build_pointer_plus (data_ref_base, base_offset);
else
{
addr_base = build1 (ADDR_EXPR,
vect_ptr_type = build_pointer_type (STMT_VINFO_VECTYPE (stmt_info));
base = get_base_address (DR_REF (dr));
if (base
- && INDIRECT_REF_P (base))
+ && TREE_CODE (base) == MEM_REF)
vect_ptr_type
= build_qualified_type (vect_ptr_type,
TYPE_QUALS (TREE_TYPE (TREE_OPERAND (base, 0))));
vec_stmt = force_gimple_operand (vec_stmt, &seq, false, addr_expr);
gimple_seq_add_seq (new_stmt_list, seq);
+ if (DR_PTR_INFO (dr)
+ && TREE_CODE (vec_stmt) == SSA_NAME)
+ {
+ duplicate_ssa_name_ptr_info (vec_stmt, DR_PTR_INFO (dr));
+ if (offset)
+ {
+ SSA_NAME_PTR_INFO (vec_stmt)->align = 1;
+ SSA_NAME_PTR_INFO (vec_stmt)->misalign = 0;
+ }
+ }
+
if (vect_print_dump_info (REPORT_DETAILS))
{
fprintf (vect_dump, "created ");
/* Function vect_create_data_ref_ptr.
- Create a new pointer to vector type (vp), that points to the first location
- accessed in the loop by STMT, along with the def-use update chain to
- appropriately advance the pointer through the loop iterations. Also set
- aliasing information for the pointer. This vector pointer is used by the
- callers to this function to create a memory reference expression for vector
- load/store access.
+ Create a new pointer-to-AGGR_TYPE variable (ap), that points to the first
+ location accessed in the loop by STMT, along with the def-use update
+ chain to appropriately advance the pointer through the loop iterations.
+ Also set aliasing information for the pointer. This pointer is used by
+ the callers to this function to create a memory reference expression for
+ vector load/store access.
Input:
1. STMT: a stmt that references memory. Expected to be of the form
GIMPLE_ASSIGN <name, data-ref> or
GIMPLE_ASSIGN <data-ref, name>.
- 2. AT_LOOP: the loop where the vector memref is to be created.
- 3. OFFSET (optional): an offset to be added to the initial address accessed
+ 2. AGGR_TYPE: the type of the reference, which should be either a vector
+ or an array.
+ 3. AT_LOOP: the loop where the vector memref is to be created.
+ 4. OFFSET (optional): an offset to be added to the initial address accessed
by the data-ref in STMT.
- 4. ONLY_INIT: indicate if vp is to be updated in the loop, or remain
+ 5. BSI: location where the new stmts are to be placed if there is no loop
+ 6. ONLY_INIT: indicate if ap is to be updated in the loop, or remain
pointing to the initial address.
- 5. TYPE: if not NULL indicates the required type of the data-ref.
Output:
1. Declare a new ptr to vector_type, and have it point to the base of the
data reference (initial addressed accessed by the data reference).
For example, for vector of type V8HI, the following code is generated:
- v8hi *vp;
- vp = (v8hi *)initial_address;
+ v8hi *ap;
+ ap = (v8hi *)initial_address;
if OFFSET is not supplied:
initial_address = &a[init];
Return the increment stmt that updates the pointer in PTR_INCR.
3. Set INV_P to true if the access pattern of the data reference in the
- vectorized loop is invariant. Set it to false otherwise.
+ vectorized loop is invariant. Set it to false otherwise.
4. Return the pointer. */
tree
-vect_create_data_ref_ptr (gimple stmt, struct loop *at_loop,
- tree offset, tree *initial_address, gimple *ptr_incr,
+vect_create_data_ref_ptr (gimple stmt, tree aggr_type, struct loop *at_loop,
+ tree offset, tree *initial_address,
+ gimple_stmt_iterator *gsi, gimple *ptr_incr,
bool only_init, bool *inv_p)
{
tree base_name;
struct loop *loop = NULL;
bool nested_in_vect_loop = false;
struct loop *containing_loop = NULL;
- tree vectype = STMT_VINFO_VECTYPE (stmt_info);
- tree vect_ptr_type;
- tree vect_ptr;
+ tree aggr_ptr_type;
+ tree aggr_ptr;
tree new_temp;
gimple vec_stmt;
gimple_seq new_stmt_list = NULL;
edge pe = NULL;
basic_block new_bb;
- tree vect_ptr_init;
+ tree aggr_ptr_init;
struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
- tree vptr;
+ tree aptr;
gimple_stmt_iterator incr_gsi;
bool insert_after;
+ bool negative;
tree indx_before_incr, indx_after_incr;
gimple incr;
tree step;
bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
- gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
tree base;
+ gcc_assert (TREE_CODE (aggr_type) == ARRAY_TYPE
+ || TREE_CODE (aggr_type) == VECTOR_TYPE);
+
if (loop_vinfo)
{
loop = LOOP_VINFO_LOOP (loop_vinfo);
*inv_p = true;
else
*inv_p = false;
+ negative = tree_int_cst_compare (step, size_zero_node) < 0;
/* Create an expression for the first address accessed by this load
in LOOP. */
if (vect_print_dump_info (REPORT_DETAILS))
{
tree data_ref_base = base_name;
- fprintf (vect_dump, "create vector-pointer variable to type: ");
- print_generic_expr (vect_dump, vectype, TDF_SLIM);
+ fprintf (vect_dump, "create %s-pointer variable to type: ",
+ tree_code_name[(int) TREE_CODE (aggr_type)]);
+ print_generic_expr (vect_dump, aggr_type, TDF_SLIM);
if (TREE_CODE (data_ref_base) == VAR_DECL
|| TREE_CODE (data_ref_base) == ARRAY_REF)
fprintf (vect_dump, " vectorizing an array ref: ");
print_generic_expr (vect_dump, base_name, TDF_SLIM);
}
- /** (1) Create the new vector-pointer variable: **/
- vect_ptr_type = build_pointer_type (vectype);
+ /* (1) Create the new aggregate-pointer variable. */
+ aggr_ptr_type = build_pointer_type (aggr_type);
base = get_base_address (DR_REF (dr));
if (base
- && INDIRECT_REF_P (base))
- vect_ptr_type
- = build_qualified_type (vect_ptr_type,
+ && TREE_CODE (base) == MEM_REF)
+ aggr_ptr_type
+ = build_qualified_type (aggr_ptr_type,
TYPE_QUALS (TREE_TYPE (TREE_OPERAND (base, 0))));
- vect_ptr = vect_get_new_vect_var (vect_ptr_type, vect_pointer_var,
+ aggr_ptr = vect_get_new_vect_var (aggr_ptr_type, vect_pointer_var,
get_name (base_name));
- /* Vector types inherit the alias set of their component type by default so
- we need to use a ref-all pointer if the data reference does not conflict
- with the created vector data reference because it is not addressable. */
- if (!alias_sets_conflict_p (get_deref_alias_set (vect_ptr),
+ /* Vector and array types inherit the alias set of their component
+ type by default so we need to use a ref-all pointer if the data
+ reference does not conflict with the created aggregated data
+ reference because it is not addressable. */
+ if (!alias_sets_conflict_p (get_deref_alias_set (aggr_ptr),
get_alias_set (DR_REF (dr))))
{
- vect_ptr_type
- = build_pointer_type_for_mode (vectype,
- TYPE_MODE (vect_ptr_type), true);
- vect_ptr = vect_get_new_vect_var (vect_ptr_type, vect_pointer_var,
+ aggr_ptr_type
+ = build_pointer_type_for_mode (aggr_type,
+ TYPE_MODE (aggr_ptr_type), true);
+ aggr_ptr = vect_get_new_vect_var (aggr_ptr_type, vect_pointer_var,
get_name (base_name));
}
/* Likewise for any of the data references in the stmt group. */
- else if (STMT_VINFO_DR_GROUP_SIZE (stmt_info) > 1)
+ else if (STMT_VINFO_GROUP_SIZE (stmt_info) > 1)
{
- gimple orig_stmt = STMT_VINFO_DR_GROUP_FIRST_DR (stmt_info);
+ gimple orig_stmt = STMT_VINFO_GROUP_FIRST_ELEMENT (stmt_info);
do
{
tree lhs = gimple_assign_lhs (orig_stmt);
- if (!alias_sets_conflict_p (get_deref_alias_set (vect_ptr),
+ if (!alias_sets_conflict_p (get_deref_alias_set (aggr_ptr),
get_alias_set (lhs)))
{
- vect_ptr_type
- = build_pointer_type_for_mode (vectype,
- TYPE_MODE (vect_ptr_type), true);
- vect_ptr
- = vect_get_new_vect_var (vect_ptr_type, vect_pointer_var,
+ aggr_ptr_type
+ = build_pointer_type_for_mode (aggr_type,
+ TYPE_MODE (aggr_ptr_type), true);
+ aggr_ptr
+ = vect_get_new_vect_var (aggr_ptr_type, vect_pointer_var,
get_name (base_name));
break;
}
- orig_stmt = STMT_VINFO_DR_GROUP_NEXT_DR (vinfo_for_stmt (orig_stmt));
+ orig_stmt = STMT_VINFO_GROUP_NEXT_ELEMENT (vinfo_for_stmt (orig_stmt));
}
while (orig_stmt);
}
- add_referenced_var (vect_ptr);
+ add_referenced_var (aggr_ptr);
- /** Note: If the dataref is in an inner-loop nested in LOOP, and we are
- vectorizing LOOP (i.e. outer-loop vectorization), we need to create two
- def-use update cycles for the pointer: One relative to the outer-loop
- (LOOP), which is what steps (3) and (4) below do. The other is relative
- to the inner-loop (which is the inner-most loop containing the dataref),
- and this is done be step (5) below.
+ /* Note: If the dataref is in an inner-loop nested in LOOP, and we are
+ vectorizing LOOP (i.e., outer-loop vectorization), we need to create two
+ def-use update cycles for the pointer: one relative to the outer-loop
+ (LOOP), which is what steps (3) and (4) below do. The other is relative
+ to the inner-loop (which is the inner-most loop containing the dataref),
+ and this is done be step (5) below.
- When vectorizing inner-most loops, the vectorized loop (LOOP) is also the
- inner-most loop, and so steps (3),(4) work the same, and step (5) is
- redundant. Steps (3),(4) create the following:
+ When vectorizing inner-most loops, the vectorized loop (LOOP) is also the
+ inner-most loop, and so steps (3),(4) work the same, and step (5) is
+ redundant. Steps (3),(4) create the following:
vp0 = &base_addr;
LOOP: vp1 = phi(vp0,vp2)
vp2 = vp1 + step
goto LOOP
- If there is an inner-loop nested in loop, then step (5) will also be
- applied, and an additional update in the inner-loop will be created:
+ If there is an inner-loop nested in loop, then step (5) will also be
+ applied, and an additional update in the inner-loop will be created:
vp0 = &base_addr;
LOOP: vp1 = phi(vp0,vp2)
vp2 = vp1 + step
if () goto LOOP */
- /** (3) Calculate the initial address the vector-pointer, and set
- the vector-pointer to point to it before the loop: **/
+ /* (2) Calculate the initial address of the aggregate-pointer, and set
+ the aggregate-pointer to point to it before the loop. */
/* Create: (&(base[init_val+offset]) in the loop preheader. */
gcc_assert (!new_bb);
}
else
- gsi_insert_seq_before (&gsi, new_stmt_list, GSI_SAME_STMT);
+ gsi_insert_seq_before (gsi, new_stmt_list, GSI_SAME_STMT);
}
*initial_address = new_temp;
- /* Create: p = (vectype *) initial_base */
- vec_stmt = gimple_build_assign (vect_ptr,
- fold_convert (vect_ptr_type, new_temp));
- vect_ptr_init = make_ssa_name (vect_ptr, vec_stmt);
- gimple_assign_set_lhs (vec_stmt, vect_ptr_init);
- if (pe)
+ /* Create: p = (aggr_type *) initial_base */
+ if (TREE_CODE (new_temp) != SSA_NAME
+ || !useless_type_conversion_p (aggr_ptr_type, TREE_TYPE (new_temp)))
{
- new_bb = gsi_insert_on_edge_immediate (pe, vec_stmt);
- gcc_assert (!new_bb);
+ vec_stmt = gimple_build_assign (aggr_ptr,
+ fold_convert (aggr_ptr_type, new_temp));
+ aggr_ptr_init = make_ssa_name (aggr_ptr, vec_stmt);
+ /* Copy the points-to information if it exists. */
+ if (DR_PTR_INFO (dr))
+ duplicate_ssa_name_ptr_info (aggr_ptr_init, DR_PTR_INFO (dr));
+ gimple_assign_set_lhs (vec_stmt, aggr_ptr_init);
+ if (pe)
+ {
+ new_bb = gsi_insert_on_edge_immediate (pe, vec_stmt);
+ gcc_assert (!new_bb);
+ }
+ else
+ gsi_insert_before (gsi, vec_stmt, GSI_SAME_STMT);
}
else
- gsi_insert_before (&gsi, vec_stmt, GSI_SAME_STMT);
+ aggr_ptr_init = new_temp;
- /** (4) Handle the updating of the vector-pointer inside the loop.
- This is needed when ONLY_INIT is false, and also when AT_LOOP
- is the inner-loop nested in LOOP (during outer-loop vectorization).
- **/
+ /* (3) Handle the updating of the aggregate-pointer inside the loop.
+ This is needed when ONLY_INIT is false, and also when AT_LOOP is the
+ inner-loop nested in LOOP (during outer-loop vectorization). */
/* No update in loop is required. */
if (only_init && (!loop_vinfo || at_loop == loop))
- {
- /* Copy the points-to information if it exists. */
- if (DR_PTR_INFO (dr))
- duplicate_ssa_name_ptr_info (vect_ptr_init, DR_PTR_INFO (dr));
- vptr = vect_ptr_init;
- }
+ aptr = aggr_ptr_init;
else
{
- /* The step of the vector pointer is the Vector Size. */
- tree step = TYPE_SIZE_UNIT (vectype);
+ /* The step of the aggregate pointer is the type size. */
+ tree step = TYPE_SIZE_UNIT (aggr_type);
/* One exception to the above is when the scalar step of the load in
LOOP is zero. In this case the step here is also zero. */
if (*inv_p)
step = size_zero_node;
+ else if (negative)
+ step = fold_build1 (NEGATE_EXPR, TREE_TYPE (step), step);
standard_iv_increment_position (loop, &incr_gsi, &insert_after);
- create_iv (vect_ptr_init,
- fold_convert (vect_ptr_type, step),
- vect_ptr, loop, &incr_gsi, insert_after,
+ create_iv (aggr_ptr_init,
+ fold_convert (aggr_ptr_type, step),
+ aggr_ptr, loop, &incr_gsi, insert_after,
&indx_before_incr, &indx_after_incr);
incr = gsi_stmt (incr_gsi);
set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo, NULL));
if (ptr_incr)
*ptr_incr = incr;
- vptr = indx_before_incr;
+ aptr = indx_before_incr;
}
if (!nested_in_vect_loop || only_init)
- return vptr;
+ return aptr;
- /** (5) Handle the updating of the vector-pointer inside the inner-loop
- nested in LOOP, if exists: **/
+ /* (4) Handle the updating of the aggregate-pointer inside the inner-loop
+ nested in LOOP, if exists. */
gcc_assert (nested_in_vect_loop);
if (!only_init)
{
standard_iv_increment_position (containing_loop, &incr_gsi,
&insert_after);
- create_iv (vptr, fold_convert (vect_ptr_type, DR_STEP (dr)), vect_ptr,
+ create_iv (aptr, fold_convert (aggr_ptr_type, DR_STEP (dr)), aggr_ptr,
containing_loop, &incr_gsi, insert_after, &indx_before_incr,
&indx_after_incr);
incr = gsi_stmt (incr_gsi);
/* Copy the points-to information if it exists. */
if (DR_PTR_INFO (dr))
- duplicate_ssa_name_ptr_info (new_dataref_ptr, DR_PTR_INFO (dr));
+ {
+ duplicate_ssa_name_ptr_info (new_dataref_ptr, DR_PTR_INFO (dr));
+ SSA_NAME_PTR_INFO (new_dataref_ptr)->align = 1;
+ SSA_NAME_PTR_INFO (new_dataref_ptr)->misalign = 0;
+ }
if (!ptr_incr)
return new_dataref_ptr;
/* Function vect_strided_store_supported.
- Returns TRUE is INTERLEAVE_HIGH and INTERLEAVE_LOW operations are supported,
- and FALSE otherwise. */
+ Returns TRUE if interleave high and interleave low permutations
+ are supported, and FALSE otherwise. */
bool
-vect_strided_store_supported (tree vectype)
+vect_strided_store_supported (tree vectype, unsigned HOST_WIDE_INT count)
{
- optab interleave_high_optab, interleave_low_optab;
- int mode;
-
- mode = (int) TYPE_MODE (vectype);
+ enum machine_mode mode = TYPE_MODE (vectype);
- /* Check that the operation is supported. */
- interleave_high_optab = optab_for_tree_code (VEC_INTERLEAVE_HIGH_EXPR,
- vectype, optab_default);
- interleave_low_optab = optab_for_tree_code (VEC_INTERLEAVE_LOW_EXPR,
- vectype, optab_default);
- if (!interleave_high_optab || !interleave_low_optab)
+ /* vect_permute_store_chain requires the group size to be a power of two. */
+ if (exact_log2 (count) == -1)
{
if (vect_print_dump_info (REPORT_DETAILS))
- fprintf (vect_dump, "no optab for interleave.");
+ fprintf (vect_dump, "the size of the group of strided accesses"
+ " is not a power of 2");
return false;
}
- if (optab_handler (interleave_high_optab, mode)->insn_code
- == CODE_FOR_nothing
- || optab_handler (interleave_low_optab, mode)->insn_code
- == CODE_FOR_nothing)
+ /* Check that the permutation is supported. */
+ if (VECTOR_MODE_P (mode))
{
- if (vect_print_dump_info (REPORT_DETAILS))
- fprintf (vect_dump, "interleave op not supported by target.");
- return false;
+ unsigned int i, nelt = GET_MODE_NUNITS (mode);
+ unsigned char *sel = XALLOCAVEC (unsigned char, nelt);
+ for (i = 0; i < nelt / 2; i++)
+ {
+ sel[i * 2] = i;
+ sel[i * 2 + 1] = i + nelt;
+ }
+ if (can_vec_perm_p (mode, false, sel))
+ {
+ for (i = 0; i < nelt; i++)
+ sel[i] += nelt / 2;
+ if (can_vec_perm_p (mode, false, sel))
+ return true;
+ }
}
- return true;
+ if (vect_print_dump_info (REPORT_DETAILS))
+ fprintf (vect_dump, "interleave op not supported by target.");
+ return false;
+}
+
+
+/* Return TRUE if vec_store_lanes is available for COUNT vectors of
+ type VECTYPE. */
+
+bool
+vect_store_lanes_supported (tree vectype, unsigned HOST_WIDE_INT count)
+{
+ return vect_lanes_optab_supported_p ("vec_store_lanes",
+ vec_store_lanes_optab,
+ vectype, count);
}
Given a chain of interleaved stores in DR_CHAIN of LENGTH that must be
a power of 2, generate interleave_high/low stmts to reorder the data
- correctly for the stores. Return the final references for stores in
+ correctly for the stores. Return the final references for stores in
RESULT_CHAIN.
E.g., LENGTH is 4 and the scalar type is short, i.e., VF is 8.
- The input is 4 vectors each containing 8 elements. We assign a number to each
- element, the input sequence is:
+ The input is 4 vectors each containing 8 elements. We assign a number to
+ each element, the input sequence is:
1st vec: 0 1 2 3 4 5 6 7
2nd vec: 8 9 10 11 12 13 14 15
i.e., we interleave the contents of the four vectors in their order.
- We use interleave_high/low instructions to create such output. The input of
+ We use interleave_high/low instructions to create such output. The input of
each interleave_high/low operation is two vectors:
1st vec 2nd vec
0 1 2 3 4 5 6 7
the even elements of the result vector are obtained left-to-right from the
- high/low elements of the first vector. The odd elements of the result are
+ high/low elements of the first vector. The odd elements of the result are
obtained left-to-right from the high/low elements of the second vector.
The output of interleave_high will be: 0 4 1 5
and of interleave_low: 2 6 3 7
- The permutation is done in log LENGTH stages. In each stage interleave_high
+ The permutation is done in log LENGTH stages. In each stage interleave_high
and interleave_low stmts are created for each pair of vectors in DR_CHAIN,
where the first argument is taken from the first half of DR_CHAIN and the
second argument from it's second half.
I3: 4 12 20 28 5 13 21 30
I4: 6 14 22 30 7 15 23 31. */
-bool
+void
vect_permute_store_chain (VEC(tree,heap) *dr_chain,
unsigned int length,
gimple stmt,
tree perm_dest, vect1, vect2, high, low;
gimple perm_stmt;
tree vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
- int i;
- unsigned int j;
- enum tree_code high_code, low_code;
-
- /* Check that the operation is supported. */
- if (!vect_strided_store_supported (vectype))
- return false;
+ tree perm_mask_low, perm_mask_high;
+ unsigned int i, n;
+ unsigned int j, nelt = TYPE_VECTOR_SUBPARTS (vectype);
+ unsigned char *sel = XALLOCAVEC (unsigned char, nelt);
*result_chain = VEC_copy (tree, heap, dr_chain);
- for (i = 0; i < exact_log2 (length); i++)
+ for (i = 0, n = nelt / 2; i < n; i++)
+ {
+ sel[i * 2] = i;
+ sel[i * 2 + 1] = i + nelt;
+ }
+ perm_mask_high = vect_gen_perm_mask (vectype, sel);
+ gcc_assert (perm_mask_high != NULL);
+
+ for (i = 0; i < nelt; i++)
+ sel[i] += nelt / 2;
+ perm_mask_low = vect_gen_perm_mask (vectype, sel);
+ gcc_assert (perm_mask_low != NULL);
+
+ for (i = 0, n = exact_log2 (length); i < n; i++)
{
for (j = 0; j < length/2; j++)
{
vect2 = VEC_index (tree, dr_chain, j+length/2);
/* Create interleaving stmt:
- in the case of big endian:
- high = interleave_high (vect1, vect2)
- and in the case of little endian:
- high = interleave_low (vect1, vect2). */
+ high = VEC_PERM_EXPR <vect1, vect2, {0, nelt, 1, nelt+1, ...}> */
perm_dest = create_tmp_var (vectype, "vect_inter_high");
DECL_GIMPLE_REG_P (perm_dest) = 1;
add_referenced_var (perm_dest);
- if (BYTES_BIG_ENDIAN)
- {
- high_code = VEC_INTERLEAVE_HIGH_EXPR;
- low_code = VEC_INTERLEAVE_LOW_EXPR;
- }
- else
- {
- low_code = VEC_INTERLEAVE_HIGH_EXPR;
- high_code = VEC_INTERLEAVE_LOW_EXPR;
- }
- perm_stmt = gimple_build_assign_with_ops (high_code, perm_dest,
- vect1, vect2);
- high = make_ssa_name (perm_dest, perm_stmt);
- gimple_assign_set_lhs (perm_stmt, high);
+ high = make_ssa_name (perm_dest, NULL);
+ perm_stmt
+ = gimple_build_assign_with_ops3 (VEC_PERM_EXPR, high,
+ vect1, vect2, perm_mask_high);
vect_finish_stmt_generation (stmt, perm_stmt, gsi);
VEC_replace (tree, *result_chain, 2*j, high);
/* Create interleaving stmt:
- in the case of big endian:
- low = interleave_low (vect1, vect2)
- and in the case of little endian:
- low = interleave_high (vect1, vect2). */
+ low = VEC_PERM_EXPR <vect1, vect2, {nelt/2, nelt*3/2, nelt/2+1,
+ nelt*3/2+1, ...}> */
perm_dest = create_tmp_var (vectype, "vect_inter_low");
DECL_GIMPLE_REG_P (perm_dest) = 1;
add_referenced_var (perm_dest);
- perm_stmt = gimple_build_assign_with_ops (low_code, perm_dest,
- vect1, vect2);
- low = make_ssa_name (perm_dest, perm_stmt);
- gimple_assign_set_lhs (perm_stmt, low);
+ low = make_ssa_name (perm_dest, NULL);
+ perm_stmt
+ = gimple_build_assign_with_ops3 (VEC_PERM_EXPR, low,
+ vect1, vect2, perm_mask_low);
vect_finish_stmt_generation (stmt, perm_stmt, gsi);
VEC_replace (tree, *result_chain, 2*j+1, low);
}
dr_chain = VEC_copy (tree, heap, *result_chain);
}
- return true;
}
/* Function vect_setup_realignment
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
tree vectype = STMT_VINFO_VECTYPE (stmt_info);
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
- struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
- edge pe;
+ struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
+ struct loop *loop = NULL;
+ edge pe = NULL;
tree scalar_dest = gimple_assign_lhs (stmt);
tree vec_dest;
gimple inc;
gimple_seq stmts = NULL;
bool inv_p;
bool compute_in_loop = false;
- bool nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
+ bool nested_in_vect_loop = false;
struct loop *containing_loop = (gimple_bb (stmt))->loop_father;
- struct loop *loop_for_initial_load;
+ struct loop *loop_for_initial_load = NULL;
+
+ if (loop_vinfo)
+ {
+ loop = LOOP_VINFO_LOOP (loop_vinfo);
+ nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
+ }
gcc_assert (alignment_support_scheme == dr_explicit_realign
|| alignment_support_scheme == dr_explicit_realign_optimized);
1. the misalignment computation
2. the extra vector load (for the optimized realignment scheme).
3. the phi node for the two vectors from which the realignment is
- done (for the optimized realignment scheme).
- */
+ done (for the optimized realignment scheme). */
/* 1. Determine where to generate the misalignment computation.
or not, which in turn determines if the misalignment is computed inside
the inner-loop, or outside LOOP. */
- if (init_addr != NULL_TREE)
+ if (init_addr != NULL_TREE || !loop_vinfo)
{
compute_in_loop = true;
gcc_assert (alignment_support_scheme == dr_explicit_realign);
if (at_loop)
*at_loop = loop_for_initial_load;
+ if (loop_for_initial_load)
+ pe = loop_preheader_edge (loop_for_initial_load);
+
/* 3. For the case of the optimized realignment, create the first vector
load at the loop preheader. */
/* Create msq_init = *(floor(p1)) in the loop preheader */
gcc_assert (!compute_in_loop);
- pe = loop_preheader_edge (loop_for_initial_load);
vec_dest = vect_create_destination_var (scalar_dest, vectype);
- ptr = vect_create_data_ref_ptr (stmt, loop_for_initial_load, NULL_TREE,
- &init_addr, &inc, true, &inv_p);
- data_ref = build1 (ALIGN_INDIRECT_REF, vectype, ptr);
+ ptr = vect_create_data_ref_ptr (stmt, vectype, loop_for_initial_load,
+ NULL_TREE, &init_addr, NULL, &inc,
+ true, &inv_p);
+ new_stmt = gimple_build_assign_with_ops
+ (BIT_AND_EXPR, NULL_TREE, ptr,
+ build_int_cst (TREE_TYPE (ptr),
+ -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
+ new_temp = make_ssa_name (SSA_NAME_VAR (ptr), new_stmt);
+ gimple_assign_set_lhs (new_stmt, new_temp);
+ new_bb = gsi_insert_on_edge_immediate (pe, new_stmt);
+ gcc_assert (!new_bb);
+ data_ref
+ = build2 (MEM_REF, TREE_TYPE (vec_dest), new_temp,
+ build_int_cst (reference_alias_ptr_type (DR_REF (dr)), 0));
new_stmt = gimple_build_assign (vec_dest, data_ref);
new_temp = make_ssa_name (vec_dest, new_stmt);
gimple_assign_set_lhs (new_stmt, new_temp);
mark_symbols_for_renaming (new_stmt);
- new_bb = gsi_insert_on_edge_immediate (pe, new_stmt);
- gcc_assert (!new_bb);
+ if (pe)
+ {
+ new_bb = gsi_insert_on_edge_immediate (pe, new_stmt);
+ gcc_assert (!new_bb);
+ }
+ else
+ gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
+
msq_init = gimple_assign_lhs (new_stmt);
}
tree builtin_decl;
/* Compute INIT_ADDR - the initial addressed accessed by this memref. */
- if (compute_in_loop)
- gcc_assert (init_addr); /* already computed by the caller. */
- else
+ if (!init_addr)
{
/* Generate the INIT_ADDR computation outside LOOP. */
init_addr = vect_create_addr_base_for_vector_ref (stmt, &stmts,
NULL_TREE, loop);
- pe = loop_preheader_edge (loop);
- new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
- gcc_assert (!new_bb);
+ if (loop)
+ {
+ pe = loop_preheader_edge (loop);
+ new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
+ gcc_assert (!new_bb);
+ }
+ else
+ gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
}
builtin_decl = targetm.vectorize.builtin_mask_for_load ();
/* Function vect_strided_load_supported.
- Returns TRUE is EXTRACT_EVEN and EXTRACT_ODD operations are supported,
+ Returns TRUE if even and odd permutations are supported,
and FALSE otherwise. */
bool
-vect_strided_load_supported (tree vectype)
+vect_strided_load_supported (tree vectype, unsigned HOST_WIDE_INT count)
{
- optab perm_even_optab, perm_odd_optab;
- int mode;
-
- mode = (int) TYPE_MODE (vectype);
+ enum machine_mode mode = TYPE_MODE (vectype);
- perm_even_optab = optab_for_tree_code (VEC_EXTRACT_EVEN_EXPR, vectype,
- optab_default);
- if (!perm_even_optab)
+ /* vect_permute_load_chain requires the group size to be a power of two. */
+ if (exact_log2 (count) == -1)
{
if (vect_print_dump_info (REPORT_DETAILS))
- fprintf (vect_dump, "no optab for perm_even.");
+ fprintf (vect_dump, "the size of the group of strided accesses"
+ " is not a power of 2");
return false;
}
- if (optab_handler (perm_even_optab, mode)->insn_code == CODE_FOR_nothing)
+ /* Check that the permutation is supported. */
+ if (VECTOR_MODE_P (mode))
{
- if (vect_print_dump_info (REPORT_DETAILS))
- fprintf (vect_dump, "perm_even op not supported by target.");
- return false;
- }
+ unsigned int i, nelt = GET_MODE_NUNITS (mode);
+ unsigned char *sel = XALLOCAVEC (unsigned char, nelt);
- perm_odd_optab = optab_for_tree_code (VEC_EXTRACT_ODD_EXPR, vectype,
- optab_default);
- if (!perm_odd_optab)
- {
- if (vect_print_dump_info (REPORT_DETAILS))
- fprintf (vect_dump, "no optab for perm_odd.");
- return false;
+ for (i = 0; i < nelt; i++)
+ sel[i] = i * 2;
+ if (can_vec_perm_p (mode, false, sel))
+ {
+ for (i = 0; i < nelt; i++)
+ sel[i] = i * 2 + 1;
+ if (can_vec_perm_p (mode, false, sel))
+ return true;
+ }
}
- if (optab_handler (perm_odd_optab, mode)->insn_code == CODE_FOR_nothing)
- {
- if (vect_print_dump_info (REPORT_DETAILS))
- fprintf (vect_dump, "perm_odd op not supported by target.");
- return false;
- }
- return true;
+ if (vect_print_dump_info (REPORT_DETAILS))
+ fprintf (vect_dump, "extract even/odd not supported by target");
+ return false;
}
+/* Return TRUE if vec_load_lanes is available for COUNT vectors of
+ type VECTYPE. */
+
+bool
+vect_load_lanes_supported (tree vectype, unsigned HOST_WIDE_INT count)
+{
+ return vect_lanes_optab_supported_p ("vec_load_lanes",
+ vec_load_lanes_optab,
+ vectype, count);
+}
/* Function vect_permute_load_chain.
Given a chain of interleaved loads in DR_CHAIN of LENGTH that must be
a power of 2, generate extract_even/odd stmts to reorder the input data
- correctly. Return the final references for loads in RESULT_CHAIN.
+ correctly. Return the final references for loads in RESULT_CHAIN.
E.g., LENGTH is 4 and the scalar type is short, i.e., VF is 8.
The input is 4 vectors each containing 8 elements. We assign a number to each
i.e., the first output vector should contain the first elements of each
interleaving group, etc.
- We use extract_even/odd instructions to create such output. The input of each
- extract_even/odd operation is two vectors
+ We use extract_even/odd instructions to create such output. The input of
+ each extract_even/odd operation is two vectors
1st vec 2nd vec
0 1 2 3 4 5 6 7
- and the output is the vector of extracted even/odd elements. The output of
+ and the output is the vector of extracted even/odd elements. The output of
extract_even will be: 0 2 4 6
and of extract_odd: 1 3 5 7
- The permutation is done in log LENGTH stages. In each stage extract_even and
- extract_odd stmts are created for each pair of vectors in DR_CHAIN in their
- order. In our example,
+ The permutation is done in log LENGTH stages. In each stage extract_even
+ and extract_odd stmts are created for each pair of vectors in DR_CHAIN in
+ their order. In our example,
E1: extract_even (1st vec, 2nd vec)
E2: extract_odd (1st vec, 2nd vec)
3rd vec (E2): 2 6 10 14 18 22 26 30
4th vec (E4): 3 7 11 15 19 23 27 31. */
-bool
+static void
vect_permute_load_chain (VEC(tree,heap) *dr_chain,
unsigned int length,
gimple stmt,
VEC(tree,heap) **result_chain)
{
tree perm_dest, data_ref, first_vect, second_vect;
+ tree perm_mask_even, perm_mask_odd;
gimple perm_stmt;
tree vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
- int i;
- unsigned int j;
-
- /* Check that the operation is supported. */
- if (!vect_strided_load_supported (vectype))
- return false;
+ unsigned int i, j, log_length = exact_log2 (length);
+ unsigned nelt = TYPE_VECTOR_SUBPARTS (vectype);
+ unsigned char *sel = XALLOCAVEC (unsigned char, nelt);
*result_chain = VEC_copy (tree, heap, dr_chain);
- for (i = 0; i < exact_log2 (length); i++)
+
+ for (i = 0; i < nelt; ++i)
+ sel[i] = i * 2;
+ perm_mask_even = vect_gen_perm_mask (vectype, sel);
+ gcc_assert (perm_mask_even != NULL);
+
+ for (i = 0; i < nelt; ++i)
+ sel[i] = i * 2 + 1;
+ perm_mask_odd = vect_gen_perm_mask (vectype, sel);
+ gcc_assert (perm_mask_odd != NULL);
+
+ for (i = 0; i < log_length; i++)
{
- for (j = 0; j < length; j +=2)
+ for (j = 0; j < length; j += 2)
{
first_vect = VEC_index (tree, dr_chain, j);
second_vect = VEC_index (tree, dr_chain, j+1);
DECL_GIMPLE_REG_P (perm_dest) = 1;
add_referenced_var (perm_dest);
- perm_stmt = gimple_build_assign_with_ops (VEC_EXTRACT_EVEN_EXPR,
- perm_dest, first_vect,
- second_vect);
+ perm_stmt = gimple_build_assign_with_ops3 (VEC_PERM_EXPR, perm_dest,
+ first_vect, second_vect,
+ perm_mask_even);
data_ref = make_ssa_name (perm_dest, perm_stmt);
gimple_assign_set_lhs (perm_stmt, data_ref);
DECL_GIMPLE_REG_P (perm_dest) = 1;
add_referenced_var (perm_dest);
- perm_stmt = gimple_build_assign_with_ops (VEC_EXTRACT_ODD_EXPR,
- perm_dest, first_vect,
- second_vect);
+ perm_stmt = gimple_build_assign_with_ops3 (VEC_PERM_EXPR, perm_dest,
+ first_vect, second_vect,
+ perm_mask_odd);
+
data_ref = make_ssa_name (perm_dest, perm_stmt);
gimple_assign_set_lhs (perm_stmt, data_ref);
vect_finish_stmt_generation (stmt, perm_stmt, gsi);
}
dr_chain = VEC_copy (tree, heap, *result_chain);
}
- return true;
}
the scalar statements.
*/
-bool
+void
vect_transform_strided_load (gimple stmt, VEC(tree,heap) *dr_chain, int size,
gimple_stmt_iterator *gsi)
{
- stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
- gimple first_stmt = DR_GROUP_FIRST_DR (stmt_info);
- gimple next_stmt, new_stmt;
VEC(tree,heap) *result_chain = NULL;
- unsigned int i, gap_count;
- tree tmp_data_ref;
/* DR_CHAIN contains input data-refs that are a part of the interleaving.
RESULT_CHAIN is the output of vect_permute_load_chain, it contains permuted
vectors, that are ready for vector computation. */
result_chain = VEC_alloc (tree, heap, size);
- /* Permute. */
- if (!vect_permute_load_chain (dr_chain, size, stmt, gsi, &result_chain))
- return false;
+ vect_permute_load_chain (dr_chain, size, stmt, gsi, &result_chain);
+ vect_record_strided_load_vectors (stmt, result_chain);
+ VEC_free (tree, heap, result_chain);
+}
+
+/* RESULT_CHAIN contains the output of a group of strided loads that were
+ generated as part of the vectorization of STMT. Assign the statement
+ for each vector to the associated scalar statement. */
+
+void
+vect_record_strided_load_vectors (gimple stmt, VEC(tree,heap) *result_chain)
+{
+ gimple first_stmt = GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt));
+ gimple next_stmt, new_stmt;
+ unsigned int i, gap_count;
+ tree tmp_data_ref;
/* Put a permuted data-ref in the VECTORIZED_STMT field.
Since we scan the chain starting from it's first node, their order
corresponds the order of data-refs in RESULT_CHAIN. */
next_stmt = first_stmt;
gap_count = 1;
- for (i = 0; VEC_iterate (tree, result_chain, i, tmp_data_ref); i++)
+ FOR_EACH_VEC_ELT (tree, result_chain, i, tmp_data_ref)
{
if (!next_stmt)
break;
- /* Skip the gaps. Loads created for the gaps will be removed by dead
- code elimination pass later. No need to check for the first stmt in
+ /* Skip the gaps. Loads created for the gaps will be removed by dead
+ code elimination pass later. No need to check for the first stmt in
the group, since it always exists.
- DR_GROUP_GAP is the number of steps in elements from the previous
- access (if there is no gap DR_GROUP_GAP is 1). We skip loads that
- correspond to the gaps.
- */
+ GROUP_GAP is the number of steps in elements from the previous
+ access (if there is no gap GROUP_GAP is 1). We skip loads that
+ correspond to the gaps. */
if (next_stmt != first_stmt
- && gap_count < DR_GROUP_GAP (vinfo_for_stmt (next_stmt)))
+ && gap_count < GROUP_GAP (vinfo_for_stmt (next_stmt)))
{
gap_count++;
continue;
STMT_VINFO_VEC_STMT (vinfo_for_stmt (next_stmt)) = new_stmt;
else
{
- if (!DR_GROUP_SAME_DR_STMT (vinfo_for_stmt (next_stmt)))
+ if (!GROUP_SAME_DR_STMT (vinfo_for_stmt (next_stmt)))
{
gimple prev_stmt =
STMT_VINFO_VEC_STMT (vinfo_for_stmt (next_stmt));
}
}
- next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt));
+ next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
gap_count = 1;
/* If NEXT_STMT accesses the same DR as the previous statement,
put the same TMP_DATA_REF as its vectorized statement; otherwise
get the next data-ref from RESULT_CHAIN. */
- if (!next_stmt || !DR_GROUP_SAME_DR_STMT (vinfo_for_stmt (next_stmt)))
+ if (!next_stmt || !GROUP_SAME_DR_STMT (vinfo_for_stmt (next_stmt)))
break;
}
}
-
- VEC_free (tree, heap, result_chain);
- return true;
}
/* Function vect_force_dr_alignment_p.
if (TREE_ASM_WRITTEN (decl))
return false;
+ /* Do not override explicit alignment set by the user when an explicit
+ section name is also used. This is a common idiom used by many
+ software projects. */
+ if (DECL_SECTION_NAME (decl) != NULL_TREE
+ && !DECL_HAS_IMPLICIT_SECTION_NAME_P (decl))
+ return false;
+
if (TREE_STATIC (decl))
return (alignment <= MAX_OFILE_ALIGNMENT);
else
if (aligned_access_p (dr) && !check_aligned_accesses)
return dr_aligned;
- if (!loop_vinfo)
- /* FORNOW: Misaligned accesses are supported only in loops. */
- return dr_unaligned_unsupported;
-
- vect_loop = LOOP_VINFO_LOOP (loop_vinfo);
- nested_in_vect_loop = nested_in_vect_loop_p (vect_loop, stmt);
+ if (loop_vinfo)
+ {
+ vect_loop = LOOP_VINFO_LOOP (loop_vinfo);
+ nested_in_vect_loop = nested_in_vect_loop_p (vect_loop, stmt);
+ }
/* Possibly unaligned access. */
/* We can choose between using the implicit realignment scheme (generating
a misaligned_move stmt) and the explicit realignment scheme (generating
- aligned loads with a REALIGN_LOAD). There are two variants to the explicit
- realignment scheme: optimized, and unoptimized.
+ aligned loads with a REALIGN_LOAD). There are two variants to the
+ explicit realignment scheme: optimized, and unoptimized.
We can optimize the realignment only if the step between consecutive
vector loads is equal to the vector size. Since the vector memory
accesses advance in steps of VS (Vector Size) in the vectorized loop, it
bool is_packed = false;
tree type = (TREE_TYPE (DR_REF (dr)));
- if (optab_handler (vec_realign_load_optab, mode)->insn_code !=
- CODE_FOR_nothing
+ if (optab_handler (vec_realign_load_optab, mode) != CODE_FOR_nothing
&& (!targetm.vectorize.builtin_mask_for_load
|| targetm.vectorize.builtin_mask_for_load ()))
{
tree vectype = STMT_VINFO_VECTYPE (stmt_info);
- if (nested_in_vect_loop
- && (TREE_INT_CST_LOW (DR_STEP (dr))
- != GET_MODE_SIZE (TYPE_MODE (vectype))))
+ if ((nested_in_vect_loop
+ && (TREE_INT_CST_LOW (DR_STEP (dr))
+ != GET_MODE_SIZE (TYPE_MODE (vectype))))
+ || !loop_vinfo)
return dr_explicit_realign;
else
return dr_explicit_realign_optimized;
}
if (!known_alignment_for_access_p (dr))
- {
- tree ba = DR_BASE_OBJECT (dr);
-
- if (ba)
- is_packed = contains_packed_reference (ba);
- }
+ is_packed = contains_packed_reference (DR_REF (dr));
if (targetm.vectorize.
support_vector_misalignment (mode, type,
tree type = (TREE_TYPE (DR_REF (dr)));
if (!known_alignment_for_access_p (dr))
- {
- tree ba = DR_BASE_OBJECT (dr);
-
- if (ba)
- is_packed = contains_packed_reference (ba);
- }
+ is_packed = contains_packed_reference (DR_REF (dr));
if (targetm.vectorize.
support_vector_misalignment (mode, type,