-/* Data References Analysis and Manipulation Utilities for Vectorization.
+/* Data References Analysis and Manipulation Utilities for Vectorization.
Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software
Foundation, Inc.
- Contributed by Dorit Naishlos <dorit@il.ibm.com>
+ Contributed by Dorit Naishlos <dorit@il.ibm.com>
and Ira Rosen <irar@il.ibm.com>
This file is part of GCC.
/* Return the smallest scalar part of STMT.
- This is used to determine the vectype of the stmt. We generally set the
- vectype according to the type of the result (lhs). For stmts whose
+ This is used to determine the vectype of the stmt. We generally set the
+ vectype according to the type of the result (lhs). For stmts whose
result-type is different than the type of the arguments (e.g., demotion,
- promotion), vectype will be reset appropriately (later). Note that we have
+ promotion), vectype will be reset appropriately (later). Note that we have
to visit the smallest datatype in this function, because that determines the
- VF. If the smallest datatype in the loop is present only as the rhs of a
+ VF. If the smallest datatype in the loop is present only as the rhs of a
promotion operation - we'd miss it.
Such a case, where a variable of this datatype does not appear in the lhs
anywhere in the loop, can only occur if it's an invariant: e.g.:
- 'int_x = (int) short_inv', which we'd expect to have been optimized away by
+ 'int_x = (int) short_inv', which we'd expect to have been optimized away by
invariant motion. However, we cannot rely on invariant motion to always take
invariants out of the loop, and so in the case of promotion we also have to
- check the rhs.
+ check the rhs.
LHS_SIZE_UNIT and RHS_SIZE_UNIT contain the sizes of the corresponding
types. */
if (rhs < lhs)
scalar_type = rhs_type;
}
-
- *lhs_size_unit = lhs;
+
+ *lhs_size_unit = lhs;
*rhs_size_unit = rhs;
return scalar_type;
}
/* Find the place of the data-ref in STMT in the interleaving chain that starts
from FIRST_STMT. Return -1 if the data-ref is not a part of the chain. */
-int
+int
vect_get_place_in_interleaving_chain (gimple stmt, gimple first_stmt)
{
gimple next_stmt = first_stmt;
{
gimple prev, next;
tree next_init;
- stmt_vec_info stmtinfo_a = vinfo_for_stmt (DR_STMT (dra));
+ stmt_vec_info stmtinfo_a = vinfo_for_stmt (DR_STMT (dra));
stmt_vec_info stmtinfo_b = vinfo_for_stmt (DR_STMT (drb));
prev = DR_GROUP_FIRST_DR (stmtinfo_b);
- next = DR_GROUP_NEXT_DR (vinfo_for_stmt (prev));
+ next = DR_GROUP_NEXT_DR (vinfo_for_stmt (prev));
while (next)
{
next_init = DR_INIT (STMT_VINFO_DATA_REF (vinfo_for_stmt (next)));
/* Function vect_update_interleaving_chain.
-
- For two data-refs DRA and DRB that are a part of a chain interleaved data
+
+ For two data-refs DRA and DRB that are a part of a chain interleaved data
accesses, update the interleaving chain. DRB's INIT is smaller than DRA's.
There are four possible cases:
vect_update_interleaving_chain (struct data_reference *drb,
struct data_reference *dra)
{
- stmt_vec_info stmtinfo_a = vinfo_for_stmt (DR_STMT (dra));
+ stmt_vec_info stmtinfo_a = vinfo_for_stmt (DR_STMT (dra));
stmt_vec_info stmtinfo_b = vinfo_for_stmt (DR_STMT (drb));
tree next_init, init_dra_chain, init_drb_chain;
gimple first_a, first_b;
return;
}
- /* 3. DRA is a part of a chain and DRB is not. */
+ /* 3. DRA is a part of a chain and DRB is not. */
if (DR_GROUP_FIRST_DR (stmtinfo_a) && !DR_GROUP_FIRST_DR (stmtinfo_b))
{
gimple old_first_stmt = DR_GROUP_FIRST_DR (stmtinfo_a);
if (tree_int_cst_compare (init_old, DR_INIT (drb)) > 0)
{
- /* DRB's init is smaller than the init of the stmt previously marked
- as the first stmt of the interleaving chain of DRA. Therefore, we
+ /* DRB's init is smaller than the init of the stmt previously marked
+ as the first stmt of the interleaving chain of DRA. Therefore, we
update FIRST_STMT and put DRB in the head of the list. */
DR_GROUP_FIRST_DR (stmtinfo_b) = DR_STMT (drb);
DR_GROUP_NEXT_DR (stmtinfo_b) = old_first_stmt;
-
+
/* Update all the stmts in the list to point to the new FIRST_STMT. */
tmp = old_first_stmt;
while (tmp)
{
/* Insert DRB in the list of DRA. */
vect_insert_into_interleaving_chain (drb, dra);
- DR_GROUP_FIRST_DR (stmtinfo_b) = DR_GROUP_FIRST_DR (stmtinfo_a);
+ DR_GROUP_FIRST_DR (stmtinfo_b) = DR_GROUP_FIRST_DR (stmtinfo_a);
}
return;
}
-
+
/* 4. both DRA and DRB are in some interleaving chains. */
first_a = DR_GROUP_FIRST_DR (stmtinfo_a);
first_b = DR_GROUP_FIRST_DR (stmtinfo_b);
if (tree_int_cst_compare (init_dra_chain, init_drb_chain) > 0)
{
- /* Insert the nodes of DRA chain into the DRB chain.
+ /* Insert the nodes of DRA chain into the DRB chain.
After inserting a node, continue from this node of the DRB chain (don't
start from the beginning. */
node = DR_GROUP_FIRST_DR (stmtinfo_a);
- prev = DR_GROUP_FIRST_DR (stmtinfo_b);
+ prev = DR_GROUP_FIRST_DR (stmtinfo_b);
first_stmt = first_b;
}
else
{
- /* Insert the nodes of DRB chain into the DRA chain.
+ /* Insert the nodes of DRB chain into the DRA chain.
After inserting a node, continue from this node of the DRA chain (don't
start from the beginning. */
node = DR_GROUP_FIRST_DR (stmtinfo_b);
- prev = DR_GROUP_FIRST_DR (stmtinfo_a);
+ prev = DR_GROUP_FIRST_DR (stmtinfo_a);
first_stmt = first_a;
}
-
+
while (node)
{
node_init = DR_INIT (STMT_VINFO_DATA_REF (vinfo_for_stmt (node)));
- next = DR_GROUP_NEXT_DR (vinfo_for_stmt (prev));
+ next = DR_GROUP_NEXT_DR (vinfo_for_stmt (prev));
while (next)
- {
+ {
next_init = DR_INIT (STMT_VINFO_DATA_REF (vinfo_for_stmt (next)));
if (tree_int_cst_compare (next_init, node_init) > 0)
{
DR_GROUP_NEXT_DR (vinfo_for_stmt (prev)) = node;
DR_GROUP_NEXT_DR (vinfo_for_stmt (node)) = NULL;
prev = node;
- }
+ }
DR_GROUP_FIRST_DR (vinfo_for_stmt (node)) = first_stmt;
- node = DR_GROUP_NEXT_DR (vinfo_for_stmt (node));
+ node = DR_GROUP_NEXT_DR (vinfo_for_stmt (node));
}
}
if (TREE_CODE (offset1) != TREE_CODE (offset2)
|| !BINARY_CLASS_P (offset1)
- || !BINARY_CLASS_P (offset2))
+ || !BINARY_CLASS_P (offset2))
return false;
-
- res0 = vect_equal_offsets (TREE_OPERAND (offset1, 0),
+
+ res0 = vect_equal_offsets (TREE_OPERAND (offset1, 0),
TREE_OPERAND (offset2, 0));
- res1 = vect_equal_offsets (TREE_OPERAND (offset1, 1),
+ res1 = vect_equal_offsets (TREE_OPERAND (offset1, 1),
TREE_OPERAND (offset2, 1));
return (res0 && res1);
Check if DRA and DRB are a part of interleaving. In case they are, insert
DRA and DRB in an interleaving chain. */
-static bool
+static bool
vect_check_interleaving (struct data_reference *dra,
struct data_reference *drb)
{
/* Check that the data-refs have same first location (except init) and they
are both either store or load (not load and store). */
if ((DR_BASE_ADDRESS (dra) != DR_BASE_ADDRESS (drb)
- && (TREE_CODE (DR_BASE_ADDRESS (dra)) != ADDR_EXPR
+ && (TREE_CODE (DR_BASE_ADDRESS (dra)) != ADDR_EXPR
|| TREE_CODE (DR_BASE_ADDRESS (drb)) != ADDR_EXPR
- || TREE_OPERAND (DR_BASE_ADDRESS (dra), 0)
+ || TREE_OPERAND (DR_BASE_ADDRESS (dra), 0)
!= TREE_OPERAND (DR_BASE_ADDRESS (drb),0)))
|| !vect_equal_offsets (DR_OFFSET (dra), DR_OFFSET (drb))
- || !tree_int_cst_compare (DR_INIT (dra), DR_INIT (drb))
+ || !tree_int_cst_compare (DR_INIT (dra), DR_INIT (drb))
|| DR_IS_READ (dra) != DR_IS_READ (drb))
return false;
/* Check:
1. data-refs are of the same type
2. their steps are equal
- 3. the step (if greater than zero) is greater than the difference between
+ 3. the step (if greater than zero) is greater than the difference between
data-refs' inits. */
type_size_a = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dra))));
type_size_b = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (drb))));
if (type_size_a != type_size_b
|| tree_int_cst_compare (DR_STEP (dra), DR_STEP (drb))
- || !types_compatible_p (TREE_TYPE (DR_REF (dra)),
+ || !types_compatible_p (TREE_TYPE (DR_REF (dra)),
TREE_TYPE (DR_REF (drb))))
return false;
if (init_a > init_b)
{
- /* If init_a == init_b + the size of the type * k, we have an interleaving,
+ /* If init_a == init_b + the size of the type * k, we have an interleaving,
and DRB is accessed before DRA. */
diff_mod_size = (init_a - init_b) % type_size_a;
if (step && (init_a - init_b) > step)
- return false;
+ return false;
if (diff_mod_size == 0)
{
- vect_update_interleaving_chain (drb, dra);
+ vect_update_interleaving_chain (drb, dra);
if (vect_print_dump_info (REPORT_DR_DETAILS))
{
fprintf (vect_dump, "Detected interleaving ");
print_generic_expr (vect_dump, DR_REF (drb), TDF_SLIM);
}
return true;
- }
+ }
}
- else
+ else
{
- /* If init_b == init_a + the size of the type * k, we have an
+ /* If init_b == init_a + the size of the type * k, we have an
interleaving, and DRA is accessed before DRB. */
diff_mod_size = (init_b - init_a) % type_size_a;
if (diff_mod_size == 0)
{
- vect_update_interleaving_chain (dra, drb);
+ vect_update_interleaving_chain (dra, drb);
if (vect_print_dump_info (REPORT_DR_DETAILS))
{
fprintf (vect_dump, "Detected interleaving ");
print_generic_expr (vect_dump, DR_REF (drb), TDF_SLIM);
}
return true;
- }
+ }
}
-
+
return false;
}
Return TRUE if there (might) exist a dependence between a memory-reference
DRA and a memory-reference DRB. When versioning for alias may check a
dependence at run-time, return FALSE. */
-
+
static bool
vect_analyze_data_ref_dependence (struct data_dependence_relation *ddr,
loop_vec_info loop_vinfo)
int vectorization_factor = 0;
struct data_reference *dra = DDR_A (ddr);
struct data_reference *drb = DDR_B (ddr);
- stmt_vec_info stmtinfo_a = vinfo_for_stmt (DR_STMT (dra));
+ stmt_vec_info stmtinfo_a = vinfo_for_stmt (DR_STMT (dra));
stmt_vec_info stmtinfo_b = vinfo_for_stmt (DR_STMT (drb));
int dra_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dra))));
int drb_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (drb))));
lambda_vector dist_v;
unsigned int loop_depth;
-
+
if (DDR_ARE_DEPENDENT (ddr) == chrec_known)
{
/* Independent data accesses. */
if ((DR_IS_READ (dra) && DR_IS_READ (drb) && loop_vinfo) || dra == drb)
return false;
-
+
if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know)
{
- if (loop_vinfo)
+ if (loop_vinfo)
{
if (vect_print_dump_info (REPORT_DR_DETAILS))
{
fprintf (vect_dump, " and ");
print_generic_expr (vect_dump, DR_REF (drb), TDF_SLIM);
}
-
+
/* Add to list of ddrs that need to be tested at run-time. */
return !vect_mark_for_runtime_alias_test (ddr, loop_vinfo);
}
}
/* Versioning for alias is not yet supported for basic block SLP, and
- dependence distance is unapplicable, hence, in case of known data
+ dependence distance is unapplicable, hence, in case of known data
dependence, basic block vectorization is impossible for now. */
if (!loop_vinfo)
{
if (dra != drb && vect_check_interleaving (dra, drb))
return false;
-
+
if (vect_print_dump_info (REPORT_DR_DETAILS))
{
fprintf (vect_dump, "determined dependence between ");
print_generic_expr (vect_dump, DR_REF (drb), TDF_SLIM);
}
- return true;
+ return true;
}
/* Loop-based vectorization and known data dependence. */
}
/* Add to list of ddrs that need to be tested at run-time. */
return !vect_mark_for_runtime_alias_test (ddr, loop_vinfo);
- }
+ }
loop_depth = index_in_loop_nest (loop->num, DDR_LOOP_NEST (ddr));
for (i = 0; VEC_iterate (lambda_vector, DDR_DIST_VECTS (ddr), i, dist_v); i++)
}
/* For interleaving, mark that there is a read-write dependency if
- necessary. We check before that one of the data-refs is store. */
+ necessary. We check before that one of the data-refs is store. */
if (DR_IS_READ (dra))
DR_GROUP_READ_WRITE_DEPENDENCE (stmtinfo_a) = true;
else
if (DR_IS_READ (drb))
DR_GROUP_READ_WRITE_DEPENDENCE (stmtinfo_b) = true;
}
-
+
continue;
}
- if (abs (dist) >= vectorization_factor
+ if (abs (dist) >= vectorization_factor
|| (dist > 0 && DDR_REVERSED_P (ddr)))
{
- /* Dependence distance does not create dependence, as far as
- vectorization is concerned, in this case. If DDR_REVERSED_P the
+ /* Dependence distance does not create dependence, as far as
+ vectorization is concerned, in this case. If DDR_REVERSED_P the
order of the data-refs in DDR was reversed (to make distance
vector positive), and the actual distance is negative. */
if (vect_print_dump_info (REPORT_DR_DETAILS))
}
/* Function vect_analyze_data_ref_dependences.
-
+
Examine all the data references in the loop, and make sure there do not
exist any data dependences between them. */
-
+
bool
-vect_analyze_data_ref_dependences (loop_vec_info loop_vinfo,
+vect_analyze_data_ref_dependences (loop_vec_info loop_vinfo,
bb_vec_info bb_vinfo)
{
unsigned int i;
VEC (ddr_p, heap) *ddrs = NULL;
struct data_dependence_relation *ddr;
- if (vect_print_dump_info (REPORT_DETAILS))
+ if (vect_print_dump_info (REPORT_DETAILS))
fprintf (vect_dump, "=== vect_analyze_dependences ===");
-
+
if (loop_vinfo)
ddrs = LOOP_VINFO_DDRS (loop_vinfo);
else
ddrs = BB_VINFO_DDRS (bb_vinfo);
-
+
for (i = 0; VEC_iterate (ddr_p, ddrs, i, ddr); i++)
if (vect_analyze_data_ref_dependence (ddr, loop_vinfo))
return false;
vect_compute_data_ref_alignment (struct data_reference *dr)
{
gimple stmt = DR_STMT (dr);
- stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
+ stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
struct loop *loop = NULL;
tree ref = DR_REF (dr);
bool base_aligned;
tree misalign;
tree aligned_to, alignment;
-
+
if (vect_print_dump_info (REPORT_DETAILS))
fprintf (vect_dump, "vect_compute_data_ref_alignment:");
if (loop_vinfo)
loop = LOOP_VINFO_LOOP (loop_vinfo);
-
+
/* Initialize misalignment to unknown. */
SET_DR_MISALIGNMENT (dr, -1);
{
tree step = DR_STEP (dr);
HOST_WIDE_INT dr_step = TREE_INT_CST_LOW (step);
-
+
if (dr_step % GET_MODE_SIZE (TYPE_MODE (vectype)) == 0)
{
if (vect_print_dump_info (REPORT_ALIGNMENT))
return true;
}
- if ((DECL_P (base)
+ if ((DECL_P (base)
&& tree_int_cst_compare (ssize_int (DECL_ALIGN_UNIT (base)),
alignment) >= 0)
|| (TREE_CODE (base_addr) == SSA_NAME
alignment) >= 0))
base_aligned = true;
else
- base_aligned = false;
+ base_aligned = false;
- if (!base_aligned)
+ if (!base_aligned)
{
- /* Do not change the alignment of global variables if
+ /* Do not change the alignment of global variables if
flag_section_anchors is enabled. */
if (!vect_can_force_dr_alignment_p (base, TYPE_ALIGN (vectype))
|| (TREE_STATIC (base) && flag_section_anchors))
}
return true;
}
-
+
/* Force the alignment of the decl.
NOTE: This is the only change to the code we make during
the analysis phase, before deciding to vectorize the loop. */
/* At this point we assume that the base is aligned. */
gcc_assert (base_aligned
- || (TREE_CODE (base) == VAR_DECL
+ || (TREE_CODE (base) == VAR_DECL
&& DECL_ALIGN (base) >= TYPE_ALIGN (vectype)));
/* Modulo alignment. */
Return FALSE if a data reference is found that cannot be vectorized. */
static bool
-vect_compute_data_refs_alignment (loop_vec_info loop_vinfo,
+vect_compute_data_refs_alignment (loop_vec_info loop_vinfo,
bb_vec_info bb_vinfo)
{
VEC (data_reference_p, heap) *datarefs;
datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
else
datarefs = BB_VINFO_DATAREFS (bb_vinfo);
-
+
for (i = 0; VEC_iterate (data_reference_p, datarefs, i, dr); i++)
if (!vect_compute_data_ref_alignment (dr))
return false;
if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
{
if (DR_IS_READ (dr))
- fprintf (vect_dump,
+ fprintf (vect_dump,
"not vectorized: unsupported unaligned load.");
else
- fprintf (vect_dump,
+ fprintf (vect_dump,
"not vectorized: unsupported unaligned store.");
}
return false;
only if natural alignment is reachable through peeling. */
if (known_alignment_for_access_p (dr) && !aligned_access_p (dr))
{
- HOST_WIDE_INT elmsize =
+ HOST_WIDE_INT elmsize =
int_cst_value (TYPE_SIZE_UNIT (TREE_TYPE (vectype)));
if (vect_print_dump_info (REPORT_DETAILS))
{
/* While cost model enhancements are expected in the future, the high level
view of the code at this time is as follows:
- A) If there is an unsupported misaligned access then see if peeling
- to align this access can make all data references satisfy
+ A) If there is a misaligned access then see if peeling to align
+ this access can make all data references satisfy
vect_supportable_dr_alignment. If so, update data structures
as needed and return true.
+ How many accesses will become aligned due to the peeling
- How many accesses will become unaligned due to the peeling,
and the cost of misaligned accesses.
- - The cost of peeling (the extra runtime checks, the increase
+ - The cost of peeling (the extra runtime checks, the increase
in code size).
The scheme we use FORNOW: peel to force the alignment of the first
{
stmt = DR_STMT (dr);
stmt_info = vinfo_for_stmt (stmt);
- supportable_dr_alignment = vect_supportable_dr_alignment (dr);
/* For interleaving, only the alignment of the first access
matters. */
&& DR_GROUP_FIRST_DR (stmt_info) != stmt)
continue;
- if (!supportable_dr_alignment)
+ if (!DR_IS_READ (dr) && !aligned_access_p (dr))
{
do_peeling = vector_alignment_reachable_p (dr);
if (do_peeling)
}
}
- vect_versioning_for_alias_required
+ vect_versioning_for_alias_required
= LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo);
/* Temporarily, if versioning for alias is required, we disable peeling
mis /= GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dr0))));
npeel = nelements - mis;
- /* For interleaved data access every iteration accesses all the
+ /* For interleaved data access every iteration accesses all the
members of the group, therefore we divide the number of iterations
by the group size. */
- stmt_info = vinfo_for_stmt (DR_STMT (dr0));
+ stmt_info = vinfo_for_stmt (DR_STMT (dr0));
if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
npeel /= DR_GROUP_SIZE (stmt_info);
vect_update_misalignment_for_peel (dr, dr0, npeel);
supportable_dr_alignment = vect_supportable_dr_alignment (dr);
SET_DR_MISALIGNMENT (dr, save_misalignment);
-
+
if (!supportable_dr_alignment)
{
do_peeling = false;
4) all misaligned data refs with a known misalignment are supported, and
5) the number of runtime alignment checks is within reason. */
- do_versioning =
- flag_tree_vect_loop_version
+ do_versioning =
+ flag_tree_vect_loop_version
&& optimize_loop_nest_for_speed_p (loop)
&& (!loop->inner); /* FORNOW */
stmt = DR_STMT (dr);
vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
gcc_assert (vectype);
-
+
/* The rightmost bits of an aligned address must be zeros.
Construct the mask needed for this test. For example,
GET_MODE_SIZE for the vector mode V4SI is 16 bytes so the
DR_STMT (dr));
}
}
-
+
/* Versioning requires at least one misaligned data reference. */
if (!LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo))
do_versioning = false;
Return FALSE if a data reference is found that cannot be vectorized. */
bool
-vect_analyze_data_refs_alignment (loop_vec_info loop_vinfo,
+vect_analyze_data_refs_alignment (loop_vec_info loop_vinfo,
bb_vec_info bb_vinfo)
{
if (vect_print_dump_info (REPORT_DETAILS))
if (!vect_compute_data_refs_alignment (loop_vinfo, bb_vinfo))
{
if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
- fprintf (vect_dump,
+ fprintf (vect_dump,
"not vectorized: can't calculate alignment for data ref.");
return false;
}
HOST_WIDE_INT stride;
bool slp_impossible = false;
- /* For interleaving, STRIDE is STEP counted in elements, i.e., the size of the
+ /* For interleaving, STRIDE is STEP counted in elements, i.e., the size of the
interleaving group (including gaps). */
- stride = dr_step / type_size;
+ stride = dr_step / type_size;
/* Not consecutive access is possible only if it is a part of interleaving. */
if (!DR_GROUP_FIRST_DR (vinfo_for_stmt (stmt)))
{
/* Check if it this DR is a part of interleaving, and is a single
element of the group that is accessed in the loop. */
-
+
/* Gaps are supported only for loads. STEP must be a multiple of the type
size. The size of the group must be a power of 2. */
if (DR_IS_READ (dr)
the type to get COUNT_IN_BYTES. */
count_in_bytes = type_size * count;
- /* Check that the size of the interleaving (including gaps) is not
+ /* Check that the size of the interleaving (including gaps) is not
greater than STEP. */
if (dr_step && dr_step < count_in_bytes + gaps * type_size)
{
{
slp_impossible = true;
/* There is a gap after the last load in the group. This gap is a
- difference between the stride and the number of elements. When
- there is no gap, this difference should be 0. */
- DR_GROUP_GAP (vinfo_for_stmt (stmt)) = stride - count;
+ difference between the stride and the number of elements. When
+ there is no gap, this difference should be 0. */
+ DR_GROUP_GAP (vinfo_for_stmt (stmt)) = stride - count;
}
else
{
return false;
}
- /* FORNOW: we handle only interleaving that is a power of 2.
+ /* FORNOW: we handle only interleaving that is a power of 2.
We don't fail here if it may be still possible to vectorize the
group using SLP. If not, the size of the group will be checked in
vect_analyze_operations, and the vectorization will fail. */
if (stride == 0)
stride = count;
-
+
DR_GROUP_SIZE (vinfo_for_stmt (stmt)) = stride;
if (vect_print_dump_info (REPORT_DETAILS))
fprintf (vect_dump, "Detected interleaving of size %d", (int)stride);
- /* SLP: create an SLP data structure for every interleaving group of
+ /* SLP: create an SLP data structure for every interleaving group of
stores for further analysis in vect_analyse_slp. */
if (!DR_IS_READ (dr) && !slp_impossible)
{
VEC_safe_push (gimple, heap, LOOP_VINFO_STRIDED_STORES (loop_vinfo),
stmt);
if (bb_vinfo)
- VEC_safe_push (gimple, heap, BB_VINFO_STRIDED_STORES (bb_vinfo),
+ VEC_safe_push (gimple, heap, BB_VINFO_STRIDED_STORES (bb_vinfo),
stmt);
}
}
if (loop_vinfo)
loop = LOOP_VINFO_LOOP (loop_vinfo);
-
+
if (loop_vinfo && !step)
{
if (vect_print_dump_info (REPORT_DETAILS))
/* Don't allow invariant accesses in loops. */
if (loop_vinfo && dr_step == 0)
- return false;
+ return false;
if (loop && nested_in_vect_loop_p (loop, stmt))
{
/* For the rest of the analysis we use the outer-loop step. */
step = STMT_VINFO_DR_STEP (stmt_info);
dr_step = TREE_INT_CST_LOW (step);
-
+
if (dr_step == 0)
{
if (vect_print_dump_info (REPORT_ALIGNMENT))
fprintf (vect_dump, "zero step in outer loop.");
if (DR_IS_READ (dr))
- return true;
+ return true;
else
return false;
}
break;
}
}
-
+
if (found)
{
VEC_ordered_remove (ddr_p, ddrs, i);
The general structure of the analysis of data refs in the vectorizer is as
follows:
- 1- vect_analyze_data_refs(loop/bb): call
+ 1- vect_analyze_data_refs(loop/bb): call
compute_data_dependences_for_loop/bb to find and analyze all data-refs
in the loop/bb and their dependences.
2- vect_analyze_dependences(): apply dependence testing using ddrs.
*/
bool
-vect_analyze_data_refs (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo)
+vect_analyze_data_refs (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo)
{
struct loop *loop = NULL;
basic_block bb = NULL;
if (vect_print_dump_info (REPORT_DETAILS))
fprintf (vect_dump, "=== vect_analyze_data_refs ===\n");
-
+
if (loop_vinfo)
{
loop = LOOP_VINFO_LOOP (loop_vinfo);
{
gimple stmt;
stmt_vec_info stmt_info;
- basic_block bb;
- tree base, offset, init;
-
+ tree base, offset, init;
+
if (!dr || !DR_REF (dr))
{
if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
base = unshare_expr (DR_BASE_ADDRESS (dr));
offset = unshare_expr (DR_OFFSET (dr));
init = unshare_expr (DR_INIT (dr));
-
+
/* Update DR field in stmt_vec_info struct. */
- bb = gimple_bb (stmt);
/* If the dataref is in an inner-loop of the loop that is considered for
for vectorization, we also want to analyze the access relative to
- the outer-loop (DR contains information only relative to the
+ the outer-loop (DR contains information only relative to the
inner-most enclosing loop). We do that by building a reference to the
first location accessed by the inner-loop, and analyze it relative to
- the outer-loop. */
- if (loop && nested_in_vect_loop_p (loop, stmt))
+ the outer-loop. */
+ if (loop && nested_in_vect_loop_p (loop, stmt))
{
tree outer_step, outer_base, outer_init;
HOST_WIDE_INT pbitsize, pbitpos;
affine_iv base_iv, offset_iv;
tree dinit;
- /* Build a reference to the first location accessed by the
+ /* Build a reference to the first location accessed by the
inner-loop: *(BASE+INIT). (The first location is actually
BASE+INIT+OFFSET, but we add OFFSET separately later). */
tree inner_base = build_fold_indirect_ref
(fold_build2 (POINTER_PLUS_EXPR,
- TREE_TYPE (base), base,
+ TREE_TYPE (base), base,
fold_convert (sizetype, init)));
if (vect_print_dump_info (REPORT_DETAILS))
print_generic_expr (vect_dump, inner_base, TDF_SLIM);
}
- outer_base = get_inner_reference (inner_base, &pbitsize, &pbitpos,
+ outer_base = get_inner_reference (inner_base, &pbitsize, &pbitpos,
&poffset, &pmode, &punsignedp, &pvolatilep, false);
gcc_assert (outer_base != NULL_TREE);
}
outer_base = build_fold_addr_expr (outer_base);
- if (!simple_iv (loop, loop_containing_stmt (stmt), outer_base,
+ if (!simple_iv (loop, loop_containing_stmt (stmt), outer_base,
&base_iv, false))
{
if (vect_print_dump_info (REPORT_DETAILS))
if (offset)
{
if (poffset)
- poffset = fold_build2 (PLUS_EXPR, TREE_TYPE (offset), offset,
+ poffset = fold_build2 (PLUS_EXPR, TREE_TYPE (offset), offset,
poffset);
else
poffset = offset;
offset_iv.base = ssize_int (0);
offset_iv.step = ssize_int (0);
}
- else if (!simple_iv (loop, loop_containing_stmt (stmt), poffset,
+ else if (!simple_iv (loop, loop_containing_stmt (stmt), poffset,
&offset_iv, false))
{
if (vect_print_dump_info (REPORT_DETAILS))
STMT_VINFO_DR_STEP (stmt_info) = outer_step;
/* FIXME: Use canonicalize_base_object_address (base_iv.base); */
- STMT_VINFO_DR_BASE_ADDRESS (stmt_info) = base_iv.base;
+ STMT_VINFO_DR_BASE_ADDRESS (stmt_info) = base_iv.base;
STMT_VINFO_DR_INIT (stmt_info) = outer_init;
- STMT_VINFO_DR_OFFSET (stmt_info) =
+ STMT_VINFO_DR_OFFSET (stmt_info) =
fold_convert (ssizetype, offset_iv.base);
- STMT_VINFO_DR_ALIGNED_TO (stmt_info) =
+ STMT_VINFO_DR_ALIGNED_TO (stmt_info) =
size_int (highest_pow2_factor (offset_iv.base));
if (vect_print_dump_info (REPORT_DETAILS))
}
STMT_VINFO_DATA_REF (stmt_info) = dr;
-
+
/* Set vectype for STMT. */
scalar_type = TREE_TYPE (DR_REF (dr));
STMT_VINFO_VECTYPE (stmt_info) =
get_vectype_for_scalar_type (scalar_type);
- if (!STMT_VINFO_VECTYPE (stmt_info))
+ if (!STMT_VINFO_VECTYPE (stmt_info))
{
if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
{
return false;
}
}
-
+
return true;
}
/* Function vect_get_new_vect_var.
- Returns a name for a new variable. The current naming scheme appends the
- prefix "vect_" or "vect_p" (depending on the value of VAR_KIND) to
- the name of vectorizer generated variables, and appends that to NAME if
+ Returns a name for a new variable. The current naming scheme appends the
+ prefix "vect_" or "vect_p" (depending on the value of VAR_KIND) to
+ the name of vectorizer generated variables, and appends that to NAME if
provided. */
tree
if LOOP=j_loop: &in+i*2B (relative to j_loop)
Output:
- 1. Return an SSA_NAME whose value is the address of the memory location of
+ 1. Return an SSA_NAME whose value is the address of the memory location of
the first vector of the data reference.
2. If new_stmt_list is not NULL_TREE after return then the caller must insert
these statement(s) which define the returned SSA_NAME.
base_offset = ssize_int (0);
init = ssize_int (0);
base_name = build_fold_indirect_ref (unshare_expr (DR_REF (dr)));
- }
+ }
data_ref_base_var = create_tmp_var (TREE_TYPE (data_ref_base), "batmp");
add_referenced_var (data_ref_base_var);
if (TREE_CODE (DR_REF (dr)) == INDIRECT_REF)
addr_base = unshare_expr (TREE_OPERAND (DR_REF (dr), 0));
else
- addr_base = build1 (ADDR_EXPR,
+ addr_base = build1 (ADDR_EXPR,
build_pointer_type (TREE_TYPE (DR_REF (dr))),
unshare_expr (DR_REF (dr)));
}
-
+
vect_ptr_type = build_pointer_type (STMT_VINFO_VECTYPE (stmt_info));
vec_stmt = fold_convert (vect_ptr_type, addr_base);
/* Function vect_create_data_ref_ptr.
Create a new pointer to vector type (vp), that points to the first location
- accessed in the loop by STMT, along with the def-use update chain to
+ accessed in the loop by STMT, along with the def-use update chain to
appropriately advance the pointer through the loop iterations. Also set
aliasing information for the pointer. This vector pointer is used by the
callers to this function to create a memory reference expression for vector
Return the initial_address in INITIAL_ADDRESS.
2. If ONLY_INIT is true, just return the initial pointer. Otherwise, also
- update the pointer in each iteration of the loop.
+ update the pointer in each iteration of the loop.
Return the increment stmt that updates the pointer in PTR_INCR.
- 3. Set INV_P to true if the access pattern of the data reference in the
+ 3. Set INV_P to true if the access pattern of the data reference in the
vectorized loop is invariant. Set it to false otherwise.
4. Return the pointer. */
tree step;
bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
-
+
if (loop_vinfo)
{
loop = LOOP_VINFO_LOOP (loop_vinfo);
only_init = true;
*ptr_incr = NULL;
}
-
+
/* Check the step (evolution) of the load in LOOP, and record
whether it's invariant. */
if (nested_in_vect_loop)
step = STMT_VINFO_DR_STEP (stmt_info);
else
step = DR_STEP (STMT_VINFO_DATA_REF (stmt_info));
-
+
if (tree_int_cst_compare (step, size_zero_node) == 0)
*inv_p = true;
else
*inv_p = false;
/* Create an expression for the first address accessed by this load
- in LOOP. */
+ in LOOP. */
base_name = build_fold_indirect_ref (unshare_expr (DR_BASE_ADDRESS (dr)));
if (vect_print_dump_info (REPORT_DETAILS))
tree data_ref_base = base_name;
fprintf (vect_dump, "create vector-pointer variable to type: ");
print_generic_expr (vect_dump, vectype, TDF_SLIM);
- if (TREE_CODE (data_ref_base) == VAR_DECL
+ if (TREE_CODE (data_ref_base) == VAR_DECL
|| TREE_CODE (data_ref_base) == ARRAY_REF)
fprintf (vect_dump, " vectorizing an array ref: ");
else if (TREE_CODE (data_ref_base) == COMPONENT_REF)
vect_ptr_type = build_pointer_type (vectype);
vect_ptr = vect_get_new_vect_var (vect_ptr_type, vect_pointer_var,
get_name (base_name));
- /* If any of the data-references in the stmt group does not conflict
- with the created vector data-reference use a ref-all pointer instead. */
- if (STMT_VINFO_DR_GROUP_SIZE (stmt_info) > 1)
+
+ /* Vector types inherit the alias set of their component type by default so
+ we need to use a ref-all pointer if the data reference does not conflict
+ with the created vector data reference because it is not addressable. */
+ if (!alias_sets_conflict_p (get_deref_alias_set (vect_ptr),
+ get_alias_set (DR_REF (dr))))
+ {
+ vect_ptr_type
+ = build_pointer_type_for_mode (vectype,
+ TYPE_MODE (vect_ptr_type), true);
+ vect_ptr = vect_get_new_vect_var (vect_ptr_type, vect_pointer_var,
+ get_name (base_name));
+ }
+
+ /* Likewise for any of the data references in the stmt group. */
+ else if (STMT_VINFO_DR_GROUP_SIZE (stmt_info) > 1)
{
gimple orig_stmt = STMT_VINFO_DR_GROUP_FIRST_DR (stmt_info);
do
if (!alias_sets_conflict_p (get_deref_alias_set (vect_ptr),
get_alias_set (lhs)))
{
- vect_ptr_type = build_pointer_type_for_mode (vectype,
- ptr_mode, true);
- vect_ptr = vect_get_new_vect_var (vect_ptr_type, vect_pointer_var,
- get_name (base_name));
+ vect_ptr_type
+ = build_pointer_type_for_mode (vectype,
+ TYPE_MODE (vect_ptr_type), true);
+ vect_ptr
+ = vect_get_new_vect_var (vect_ptr_type, vect_pointer_var,
+ get_name (base_name));
break;
}
def-use update cycles for the pointer: One relative to the outer-loop
(LOOP), which is what steps (3) and (4) below do. The other is relative
to the inner-loop (which is the inner-most loop containing the dataref),
- and this is done be step (5) below.
+ and this is done be step (5) below.
When vectorizing inner-most loops, the vectorized loop (LOOP) is also the
inner-most loop, and so steps (3),(4) work the same, and step (5) is
vp0 = &base_addr;
LOOP: vp1 = phi(vp0,vp2)
- ...
+ ...
...
vp2 = vp1 + step
goto LOOP
-
+
If there is an inner-loop nested in loop, then step (5) will also be
applied, and an additional update in the inner-loop will be created:
**/
/* No update in loop is required. */
- if (only_init && (!loop_vinfo || at_loop == loop))
+ if (only_init && (!loop_vinfo || at_loop == loop))
{
/* Copy the points-to information if it exists. */
if (DR_PTR_INFO (dr))
{
/* The step of the vector pointer is the Vector Size. */
tree step = TYPE_SIZE_UNIT (vectype);
- /* One exception to the above is when the scalar step of the load in
+ /* One exception to the above is when the scalar step of the load in
LOOP is zero. In this case the step here is also zero. */
if (*inv_p)
step = size_zero_node;
{
standard_iv_increment_position (containing_loop, &incr_gsi,
&insert_after);
- create_iv (vptr, fold_convert (vect_ptr_type, DR_STEP (dr)), vect_ptr,
+ create_iv (vptr, fold_convert (vect_ptr_type, DR_STEP (dr)), vect_ptr,
containing_loop, &incr_gsi, insert_after, &indx_before_incr,
&indx_after_incr);
incr = gsi_stmt (incr_gsi);
if (ptr_incr)
*ptr_incr = incr;
- return indx_before_incr;
+ return indx_before_incr;
}
else
gcc_unreachable ();
/* Function bump_vector_ptr
Increment a pointer (to a vector type) by vector-size. If requested,
- i.e. if PTR-INCR is given, then also connect the new increment stmt
+ i.e. if PTR-INCR is given, then also connect the new increment stmt
to the existing def-use update-chain of the pointer, by modifying
the PTR_INCR as illustrated below:
The pointer def-use update-chain before this function:
DATAREF_PTR = phi (p_0, p_2)
....
- PTR_INCR: p_2 = DATAREF_PTR + step
+ PTR_INCR: p_2 = DATAREF_PTR + step
The pointer def-use update-chain after this function:
DATAREF_PTR = phi (p_0, p_2)
PTR_INCR: p_2 = NEW_DATAREF_PTR + step
Input:
- DATAREF_PTR - ssa_name of a pointer (to vector type) that is being updated
+ DATAREF_PTR - ssa_name of a pointer (to vector type) that is being updated
in the loop.
- PTR_INCR - optional. The stmt that updates the pointer in each iteration of
+ PTR_INCR - optional. The stmt that updates the pointer in each iteration of
the loop. The increment amount across iterations is expected
- to be vector_size.
+ to be vector_size.
BSI - location where the new update stmt is to be placed.
STMT - the original scalar memory-access stmt that is being vectorized.
BUMP - optional. The offset by which to bump the pointer. If not given,
the offset is assumed to be vector_size.
Output: Return NEW_DATAREF_PTR as illustrated above.
-
+
*/
tree
if (bump)
update = bump;
-
+
incr_stmt = gimple_build_assign_with_ops (POINTER_PLUS_EXPR, ptr_var,
dataref_ptr, update);
new_dataref_ptr = make_ssa_name (ptr_var, incr_stmt);
int mode;
mode = (int) TYPE_MODE (vectype);
-
+
/* Check that the operation is supported. */
- interleave_high_optab = optab_for_tree_code (VEC_INTERLEAVE_HIGH_EXPR,
+ interleave_high_optab = optab_for_tree_code (VEC_INTERLEAVE_HIGH_EXPR,
vectype, optab_default);
- interleave_low_optab = optab_for_tree_code (VEC_INTERLEAVE_LOW_EXPR,
+ interleave_low_optab = optab_for_tree_code (VEC_INTERLEAVE_LOW_EXPR,
vectype, optab_default);
if (!interleave_high_optab || !interleave_low_optab)
{
return false;
}
- if (optab_handler (interleave_high_optab, mode)->insn_code
+ if (optab_handler (interleave_high_optab, mode)->insn_code
== CODE_FOR_nothing
- || optab_handler (interleave_low_optab, mode)->insn_code
+ || optab_handler (interleave_low_optab, mode)->insn_code
== CODE_FOR_nothing)
{
if (vect_print_dump_info (REPORT_DETAILS))
/* Function vect_permute_store_chain.
Given a chain of interleaved stores in DR_CHAIN of LENGTH that must be
- a power of 2, generate interleave_high/low stmts to reorder the data
+ a power of 2, generate interleave_high/low stmts to reorder the data
correctly for the stores. Return the final references for stores in
RESULT_CHAIN.
1st vec: 0 1 2 3 4 5 6 7
2nd vec: 8 9 10 11 12 13 14 15
- 3rd vec: 16 17 18 19 20 21 22 23
+ 3rd vec: 16 17 18 19 20 21 22 23
4th vec: 24 25 26 27 28 29 30 31
The output sequence should be:
i.e., we interleave the contents of the four vectors in their order.
- We use interleave_high/low instructions to create such output. The input of
+ We use interleave_high/low instructions to create such output. The input of
each interleave_high/low operation is two vectors:
- 1st vec 2nd vec
- 0 1 2 3 4 5 6 7
- the even elements of the result vector are obtained left-to-right from the
- high/low elements of the first vector. The odd elements of the result are
+ 1st vec 2nd vec
+ 0 1 2 3 4 5 6 7
+ the even elements of the result vector are obtained left-to-right from the
+ high/low elements of the first vector. The odd elements of the result are
obtained left-to-right from the high/low elements of the second vector.
The output of interleave_high will be: 0 4 1 5
and of interleave_low: 2 6 3 7
-
+
The permutation is done in log LENGTH stages. In each stage interleave_high
- and interleave_low stmts are created for each pair of vectors in DR_CHAIN,
- where the first argument is taken from the first half of DR_CHAIN and the
- second argument from it's second half.
- In our example,
+ and interleave_low stmts are created for each pair of vectors in DR_CHAIN,
+ where the first argument is taken from the first half of DR_CHAIN and the
+ second argument from it's second half.
+ In our example,
I1: interleave_high (1st vec, 3rd vec)
I2: interleave_low (1st vec, 3rd vec)
I2: 2 10 18 26 3 11 19 27
I3: 4 12 20 28 5 13 21 30
I4: 6 14 22 30 7 15 23 31. */
-
+
bool
-vect_permute_store_chain (VEC(tree,heap) *dr_chain,
- unsigned int length,
+vect_permute_store_chain (VEC(tree,heap) *dr_chain,
+ unsigned int length,
gimple stmt,
gimple_stmt_iterator *gsi,
VEC(tree,heap) **result_chain)
tree perm_dest, vect1, vect2, high, low;
gimple perm_stmt;
tree vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
- tree scalar_dest;
int i;
unsigned int j;
enum tree_code high_code, low_code;
-
- scalar_dest = gimple_assign_lhs (stmt);
/* Check that the operation is supported. */
if (!vect_strided_store_supported (vectype))
vect2 = VEC_index (tree, dr_chain, j+length/2);
/* Create interleaving stmt:
- in the case of big endian:
- high = interleave_high (vect1, vect2)
- and in the case of little endian:
+ in the case of big endian:
+ high = interleave_high (vect1, vect2)
+ and in the case of little endian:
high = interleave_low (vect1, vect2). */
perm_dest = create_tmp_var (vectype, "vect_inter_high");
DECL_GIMPLE_REG_P (perm_dest) = 1;
/* Create interleaving stmt:
in the case of big endian:
- low = interleave_low (vect1, vect2)
+ low = interleave_low (vect1, vect2)
and in the case of little endian:
- low = interleave_high (vect1, vect2). */
+ low = interleave_high (vect1, vect2). */
perm_dest = create_tmp_var (vectype, "vect_inter_low");
DECL_GIMPLE_REG_P (perm_dest) = 1;
add_referenced_var (perm_dest);
}
/* Function vect_setup_realignment
-
+
This function is called when vectorizing an unaligned load using
the dr_explicit_realign[_optimized] scheme.
This function generates the following code at the loop prolog:
p = initial_addr;
x msq_init = *(floor(p)); # prolog load
- realignment_token = call target_builtin;
+ realignment_token = call target_builtin;
loop:
x msq = phi (msq_init, ---)
- The stmts marked with x are generated only for the case of
+ The stmts marked with x are generated only for the case of
dr_explicit_realign_optimized.
- The code above sets up a new (vector) pointer, pointing to the first
+ The code above sets up a new (vector) pointer, pointing to the first
location accessed by STMT, and a "floor-aligned" load using that pointer.
It also generates code to compute the "realignment-token" (if the relevant
target hook was defined), and creates a phi-node at the loop-header bb
created by the caller to this function).
For the case of dr_explicit_realign_optimized:
- The caller to this function uses the phi-result (msq) to create the
+ The caller to this function uses the phi-result (msq) to create the
realignment code inside the loop, and sets up the missing phi argument,
as follows:
- loop:
+ loop:
msq = phi (msq_init, lsq)
lsq = *(floor(p')); # load in loop
result = realign_load (msq, lsq, realignment_token);
a memory location that may be unaligned.
BSI - place where new code is to be inserted.
ALIGNMENT_SUPPORT_SCHEME - which of the two misalignment handling schemes
- is used.
-
+ is used.
+
Output:
REALIGNMENT_TOKEN - the result of a call to the builtin_mask_for_load
target hook, if defined.
/* Function vect_permute_load_chain.
Given a chain of interleaved loads in DR_CHAIN of LENGTH that must be
- a power of 2, generate extract_even/odd stmts to reorder the input data
+ a power of 2, generate extract_even/odd stmts to reorder the input data
correctly. Return the final references for loads in RESULT_CHAIN.
E.g., LENGTH is 4 and the scalar type is short, i.e., VF is 8.
1st vec: 0 1 2 3 4 5 6 7
2nd vec: 8 9 10 11 12 13 14 15
- 3rd vec: 16 17 18 19 20 21 22 23
+ 3rd vec: 16 17 18 19 20 21 22 23
4th vec: 24 25 26 27 28 29 30 31
The output sequence should be:
1st vec: 0 4 8 12 16 20 24 28
2nd vec: 1 5 9 13 17 21 25 29
- 3rd vec: 2 6 10 14 18 22 26 30
+ 3rd vec: 2 6 10 14 18 22 26 30
4th vec: 3 7 11 15 19 23 27 31
i.e., the first output vector should contain the first elements of each
We use extract_even/odd instructions to create such output. The input of each
extract_even/odd operation is two vectors
- 1st vec 2nd vec
- 0 1 2 3 4 5 6 7
+ 1st vec 2nd vec
+ 0 1 2 3 4 5 6 7
- and the output is the vector of extracted even/odd elements. The output of
+ and the output is the vector of extracted even/odd elements. The output of
extract_even will be: 0 2 4 6
and of extract_odd: 1 3 5 7
-
+
The permutation is done in log LENGTH stages. In each stage extract_even and
- extract_odd stmts are created for each pair of vectors in DR_CHAIN in their
- order. In our example,
+ extract_odd stmts are created for each pair of vectors in DR_CHAIN in their
+ order. In our example,
E1: extract_even (1st vec, 2nd vec)
E2: extract_odd (1st vec, 2nd vec)
E1: 0 2 4 6 8 10 12 14
E2: 1 3 5 7 9 11 13 15
- E3: 16 18 20 22 24 26 28 30
+ E3: 16 18 20 22 24 26 28 30
E4: 17 19 21 23 25 27 29 31
In order to proceed and create the correct sequence for the next stage (or
- for the correct output, if the second stage is the last one, as in our
- example), we first put the output of extract_even operation and then the
+ for the correct output, if the second stage is the last one, as in our
+ example), we first put the output of extract_even operation and then the
output of extract_odd in RESULT_CHAIN (which is then copied to DR_CHAIN).
The input for the second stage is:
1st vec (E1): 0 2 4 6 8 10 12 14
- 2nd vec (E3): 16 18 20 22 24 26 28 30
- 3rd vec (E2): 1 3 5 7 9 11 13 15
+ 2nd vec (E3): 16 18 20 22 24 26 28 30
+ 3rd vec (E2): 1 3 5 7 9 11 13 15
4th vec (E4): 17 19 21 23 25 27 29 31
The output of the second stage:
1st vec (E1): 0 4 8 12 16 20 24 28
2nd vec (E3): 1 5 9 13 17 21 25 29
- 3rd vec (E2): 2 6 10 14 18 22 26 30
+ 3rd vec (E2): 2 6 10 14 18 22 26 30
4th vec (E4): 3 7 11 15 19 23 27 31. */
bool
-vect_permute_load_chain (VEC(tree,heap) *dr_chain,
- unsigned int length,
+vect_permute_load_chain (VEC(tree,heap) *dr_chain,
+ unsigned int length,
gimple stmt,
gimple_stmt_iterator *gsi,
VEC(tree,heap) **result_chain)
vect_finish_stmt_generation (stmt, perm_stmt, gsi);
mark_symbols_for_renaming (perm_stmt);
- VEC_replace (tree, *result_chain, j/2, data_ref);
-
+ VEC_replace (tree, *result_chain, j/2, data_ref);
+
/* data_ref = permute_odd (first_data_ref, second_data_ref); */
perm_dest = create_tmp_var (vectype, "vect_perm_odd");
DECL_GIMPLE_REG_P (perm_dest) = 1;
unsigned int i, gap_count;
tree tmp_data_ref;
- /* DR_CHAIN contains input data-refs that are a part of the interleaving.
- RESULT_CHAIN is the output of vect_permute_load_chain, it contains permuted
+ /* DR_CHAIN contains input data-refs that are a part of the interleaving.
+ RESULT_CHAIN is the output of vect_permute_load_chain, it contains permuted
vectors, that are ready for vector computation. */
result_chain = VEC_alloc (tree, heap, size);
/* Permute. */
if (!vect_permute_load_chain (dr_chain, size, stmt, gsi, &result_chain))
return false;
- /* Put a permuted data-ref in the VECTORIZED_STMT field.
- Since we scan the chain starting from it's first node, their order
+ /* Put a permuted data-ref in the VECTORIZED_STMT field.
+ Since we scan the chain starting from it's first node, their order
corresponds the order of data-refs in RESULT_CHAIN. */
next_stmt = first_stmt;
gap_count = 1;
access (if there is no gap DR_GROUP_GAP is 1). We skip loads that
correspond to the gaps.
*/
- if (next_stmt != first_stmt
+ if (next_stmt != first_stmt
&& gap_count < DR_GROUP_GAP (vinfo_for_stmt (next_stmt)))
{
gap_count++;
while (rel_stmt)
{
prev_stmt = rel_stmt;
- rel_stmt =
+ rel_stmt =
STMT_VINFO_RELATED_STMT (vinfo_for_stmt (rel_stmt));
}
- STMT_VINFO_RELATED_STMT (vinfo_for_stmt (prev_stmt)) =
+ STMT_VINFO_RELATED_STMT (vinfo_for_stmt (prev_stmt)) =
new_stmt;
}
}
Returns whether the alignment of a DECL can be forced to be aligned
on ALIGNMENT bit boundary. */
-bool
+bool
vect_can_force_dr_alignment_p (const_tree decl, unsigned int alignment)
{
if (TREE_CODE (decl) != VAR_DECL)
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
tree vectype = STMT_VINFO_VECTYPE (stmt_info);
enum machine_mode mode = TYPE_MODE (vectype);
- bool invariant_in_outerloop = false;
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
struct loop *vect_loop = NULL;
bool nested_in_vect_loop = false;
vect_loop = LOOP_VINFO_LOOP (loop_vinfo);
nested_in_vect_loop = nested_in_vect_loop_p (vect_loop, stmt);
- if (nested_in_vect_loop)
- {
- tree outerloop_step = STMT_VINFO_DR_STEP (stmt_info);
- invariant_in_outerloop =
- (tree_int_cst_compare (outerloop_step, size_zero_node) == 0);
- }
-
/* Possibly unaligned access. */
/* We can choose between using the implicit realignment scheme (generating
bool is_packed = false;
tree type = (TREE_TYPE (DR_REF (dr)));
- if (optab_handler (vec_realign_load_optab, mode)->insn_code !=
+ if (optab_handler (vec_realign_load_optab, mode)->insn_code !=
CODE_FOR_nothing
&& (!targetm.vectorize.builtin_mask_for_load
|| targetm.vectorize.builtin_mask_for_load ()))
if (!known_alignment_for_access_p (dr))
{
tree ba = DR_BASE_OBJECT (dr);
-
+
if (ba)
is_packed = contains_packed_reference (ba);
}
-
+
if (targetm.vectorize.
builtin_support_vector_misalignment (mode, type,
DR_MISALIGNMENT (dr), is_packed))
if (!known_alignment_for_access_p (dr))
{
tree ba = DR_BASE_OBJECT (dr);
-
+
if (ba)
is_packed = contains_packed_reference (ba);
}
-
+
if (targetm.vectorize.
- builtin_support_vector_misalignment (mode, type,
+ builtin_support_vector_misalignment (mode, type,
DR_MISALIGNMENT (dr), is_packed))
return dr_unaligned_supported;
}
-
+
/* Unsupported. */
return dr_unaligned_unsupported;
}