/* Transformation Utilities for Loop Vectorization.
- Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
+ Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009
+ Free Software Foundation, Inc.
Contributed by Dorit Naishlos <dorit@il.ibm.com>
This file is part of GCC.
slp_tree, slp_instance);
static tree vect_create_destination_var (tree, tree);
static tree vect_create_data_ref_ptr
- (gimple, struct loop*, tree, tree *, gimple *, bool, bool *);
+ (gimple, struct loop*, tree, tree *, gimple *, bool, bool *, tree);
static tree vect_create_addr_base_for_vector_ref
(gimple, gimple_seq *, tree, struct loop *);
static tree vect_get_new_vect_var (tree, enum vect_var_kind, const char *);
int vec_outside_cost = 0;
int scalar_single_iter_cost = 0;
int scalar_outside_cost = 0;
- bool runtime_test = false;
int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
return 0;
}
- /* If the number of iterations is unknown, or the
- peeling-for-misalignment amount is unknown, we will have to generate
- a runtime test to test the loop count against the threshold. */
- if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
- || (byte_misalign < 0))
- runtime_test = true;
-
/* Requires loop versioning tests to handle misalignment. */
-
if (VEC_length (gimple, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo)))
{
/* FIXME: Make cost depend on complexity of individual check. */
"peeling for alignment is unknown .");
/* If peeled iterations are unknown, count a taken branch and a not taken
- branch per peeled loop. Even if scalar loop iterations are known,
- vector iterations are not known since peeled prologue iterations are
- not known. Hence guards remain the same. */
+ branch per peeled loop. Even if scalar loop iterations are known,
+ vector iterations are not known since peeled prologue iterations are
+ not known. Hence guards remain the same. */
peel_guard_costs += 2 * (TARG_COND_TAKEN_BRANCH_COST
- + TARG_COND_NOT_TAKEN_BRANCH_COST);
-
+ + TARG_COND_NOT_TAKEN_BRANCH_COST);
}
else
{
conditions/branch directions. Change the estimates below to
something more reasonable. */
- if (runtime_test)
+ /* If the number of iterations is known and we do not do versioning, we can
+ decide whether to vectorize at compile time. Hence the scalar version
+ do not carry cost model guard costs. */
+ if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
+ || VEC_length (gimple, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo))
+ || VEC_length (ddr_p, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo)))
{
/* Cost model check occurs at versioning. */
if (VEC_length (gimple, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo))
scalar_outside_cost += TARG_COND_NOT_TAKEN_BRANCH_COST;
else
{
- /* Cost model occurs at prologue generation. */
- if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
+ /* Cost model check occurs at prologue generation. */
+ if (LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo) < 0)
scalar_outside_cost += 2 * TARG_COND_TAKEN_BRANCH_COST
+ TARG_COND_NOT_TAKEN_BRANCH_COST;
/* Cost model check occurs at epilogue generation. */
gimple_seq_add_seq (new_stmt_list, seq);
/* Create base_offset */
- base_offset = size_binop (PLUS_EXPR, base_offset, init);
- base_offset = fold_convert (sizetype, base_offset);
- dest = create_tmp_var (TREE_TYPE (base_offset), "base_off");
+ base_offset = size_binop (PLUS_EXPR,
+ fold_convert (sizetype, base_offset),
+ fold_convert (sizetype, init));
+ dest = create_tmp_var (sizetype, "base_off");
add_referenced_var (dest);
base_offset = force_gimple_operand (base_offset, &seq, true, dest);
gimple_seq_add_seq (new_stmt_list, seq);
tree tmp = create_tmp_var (sizetype, "offset");
add_referenced_var (tmp);
- offset = fold_build2 (MULT_EXPR, TREE_TYPE (offset), offset, step);
- base_offset = fold_build2 (PLUS_EXPR, TREE_TYPE (base_offset),
+ offset = fold_build2 (MULT_EXPR, sizetype,
+ fold_convert (sizetype, offset), step);
+ base_offset = fold_build2 (PLUS_EXPR, sizetype,
base_offset, offset);
base_offset = force_gimple_operand (base_offset, &seq, false, tmp);
gimple_seq_add_seq (new_stmt_list, seq);
by the data-ref in STMT.
4. ONLY_INIT: indicate if vp is to be updated in the loop, or remain
pointing to the initial address.
+ 5. TYPE: if not NULL indicates the required type of the data-ref.
Output:
1. Declare a new ptr to vector_type, and have it point to the base of the
static tree
vect_create_data_ref_ptr (gimple stmt, struct loop *at_loop,
tree offset, tree *initial_address, gimple *ptr_incr,
- bool only_init, bool *inv_p)
+ bool only_init, bool *inv_p, tree type)
{
tree base_name;
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
}
/** (1) Create the new vector-pointer variable: **/
- vect_ptr_type = build_pointer_type (vectype);
+ if (type)
+ vect_ptr_type = build_pointer_type (type);
+ else
+ vect_ptr_type = build_pointer_type (vectype);
+ if (TREE_CODE (DR_BASE_ADDRESS (dr)) == SSA_NAME
+ && TYPE_RESTRICT (TREE_TYPE (DR_BASE_ADDRESS (dr))))
+ vect_ptr_type = build_qualified_type (vect_ptr_type, TYPE_QUAL_RESTRICT);
vect_ptr = vect_get_new_vect_var (vect_ptr_type, vect_pointer_var,
get_name (base_name));
+ if (TREE_CODE (DR_BASE_ADDRESS (dr)) == SSA_NAME
+ && TYPE_RESTRICT (TREE_TYPE (DR_BASE_ADDRESS (dr))))
+ {
+ get_alias_set (base_name);
+ DECL_POINTER_ALIAS_SET (vect_ptr)
+ = DECL_POINTER_ALIAS_SET (SSA_NAME_VAR (DR_BASE_ADDRESS (dr)));
+ }
+
add_referenced_var (vect_ptr);
/** (2) Add aliasing information to the new vector-pointer:
create_iv (vect_ptr_init,
fold_convert (vect_ptr_type, step),
- NULL_TREE, loop, &incr_gsi, insert_after,
+ vect_ptr, loop, &incr_gsi, insert_after,
&indx_before_incr, &indx_after_incr);
incr = gsi_stmt (incr_gsi);
set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo));
{
standard_iv_increment_position (containing_loop, &incr_gsi,
&insert_after);
- create_iv (vptr, fold_convert (vect_ptr_type, DR_STEP (dr)), NULL_TREE,
+ create_iv (vptr, fold_convert (vect_ptr_type, DR_STEP (dr)), vect_ptr,
containing_loop, &incr_gsi, insert_after, &indx_before_incr,
&indx_after_incr);
incr = gsi_stmt (incr_gsi);
gimple stmt = VEC_index (gimple, stmts, 0);
stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
tree vectype = STMT_VINFO_VECTYPE (stmt_vinfo);
- int nunits = TYPE_VECTOR_SUBPARTS (vectype);
+ int nunits;
tree vec_cst;
tree t = NULL_TREE;
int j, number_of_places_left_in_vector;
int group_size = VEC_length (gimple, stmts);
unsigned int vec_num, i;
int number_of_copies = 1;
- bool is_store = false;
VEC (tree, heap) *voprnds = VEC_alloc (tree, heap, number_of_vectors);
- bool constant_p;
+ bool constant_p, is_store;
if (STMT_VINFO_DATA_REF (stmt_vinfo))
- is_store = true;
+ {
+ is_store = true;
+ op = gimple_assign_rhs1 (stmt);
+ }
+ else
+ {
+ is_store = false;
+ op = gimple_op (stmt, op_num + 1);
+ }
+
+ if (CONSTANT_CLASS_P (op))
+ {
+ vector_type = vectype;
+ constant_p = true;
+ }
+ else
+ {
+ vector_type = get_vectype_for_scalar_type (TREE_TYPE (op));
+ gcc_assert (vector_type);
+ constant_p = false;
+ }
+
+ nunits = TYPE_VECTOR_SUBPARTS (vector_type);
/* NUMBER_OF_COPIES is the number of times we need to use the same values in
created vectors. It is greater than 1 if unrolling is performed.
number_of_copies = least_common_multiple (nunits, group_size) / group_size;
number_of_places_left_in_vector = nunits;
- constant_p = true;
for (j = 0; j < number_of_copies; j++)
{
for (i = group_size - 1; VEC_iterate (gimple, stmts, i, stmt); i--)
{
- if (is_store)
- op = gimple_assign_rhs1 (stmt);
- else
- op = gimple_op (stmt, op_num + 1);
- if (!CONSTANT_CLASS_P (op))
- constant_p = false;
-
+ if (is_store)
+ op = gimple_assign_rhs1 (stmt);
+ else
+ op = gimple_op (stmt, op_num + 1);
+
/* Create 'vect_ = {op0,op1,...,opn}'. */
t = tree_cons (NULL_TREE, op, t);
{
number_of_places_left_in_vector = nunits;
- vector_type = get_vectype_for_scalar_type (TREE_TYPE (op));
- gcc_assert (vector_type);
if (constant_p)
vec_cst = build_vector (vector_type, t);
else
vec_cst = build_constructor_from_list (vector_type, t);
- constant_p = true;
VEC_quick_push (tree, voprnds,
- vect_init_vector (stmt, vec_cst, vector_type,
- NULL));
+ vect_init_vector (stmt, vec_cst, vector_type, NULL));
t = NULL_TREE;
}
}
stmt_vec_info def_stmt_info = NULL;
stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
tree vectype = STMT_VINFO_VECTYPE (stmt_vinfo);
- int nunits = TYPE_VECTOR_SUBPARTS (vectype);
+ unsigned int nunits = TYPE_VECTOR_SUBPARTS (vectype);
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
tree vec_inv;
tree vec_cst;
{
t = tree_cons (NULL_TREE, op, t);
}
- vector_type = get_vectype_for_scalar_type (TREE_TYPE (op));
- gcc_assert (vector_type);
- vec_cst = build_vector (vector_type, t);
-
- return vect_init_vector (stmt, vec_cst, vector_type, NULL);
+ vec_cst = build_vector (vectype, t);
+ return vect_init_vector (stmt, vec_cst, vectype, NULL);
}
/* Case 2: operand is defined outside the loop - loop invariant. */
case vect_invariant_def:
{
+ vector_type = get_vectype_for_scalar_type (TREE_TYPE (def));
+ gcc_assert (vector_type);
+ nunits = TYPE_VECTOR_SUBPARTS (vector_type);
+
if (scalar_def)
*scalar_def = def;
}
/* FIXME: use build_constructor directly. */
- vector_type = get_vectype_for_scalar_type (TREE_TYPE (def));
- gcc_assert (vector_type);
vec_inv = build_constructor_from_list (vector_type, t);
return vect_init_vector (stmt, vec_inv, vector_type, NULL);
}
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
- gcc_assert (stmt == gsi_stmt (*gsi));
gcc_assert (gimple_code (stmt) != GIMPLE_LABEL);
gsi_insert_before (gsi, vec_stmt, GSI_SAME_STMT);
print_gimple_stmt (vect_dump, vec_stmt, 0, TDF_SLIM);
}
- /* Make sure gsi points to the stmt that is being vectorized. */
- gcc_assert (stmt == gsi_stmt (*gsi));
-
- gimple_set_location (vec_stmt, gimple_location (stmt));
+ gimple_set_location (vec_stmt, gimple_location (gsi_stmt (*gsi)));
}
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
tree vectype = STMT_VINFO_VECTYPE (stmt_vinfo);
int nunits = TYPE_VECTOR_SUBPARTS (vectype);
+ tree scalar_type = TREE_TYPE (vectype);
enum tree_code code = gimple_assign_rhs_code (stmt);
tree type = TREE_TYPE (init_val);
tree vecdef;
tree init_def;
tree t = NULL_TREE;
int i;
- tree vector_type;
bool nested_in_vect_loop = false;
gcc_assert (POINTER_TYPE_P (type) || INTEGRAL_TYPE_P (type) || SCALAR_FLOAT_TYPE_P (type));
else
*adjustment_def = init_val;
/* Create a vector of zeros for init_def. */
- if (SCALAR_FLOAT_TYPE_P (type))
- def_for_init = build_real (type, dconst0);
+ if (SCALAR_FLOAT_TYPE_P (scalar_type))
+ def_for_init = build_real (scalar_type, dconst0);
else
- def_for_init = build_int_cst (type, 0);
+ def_for_init = build_int_cst (scalar_type, 0);
+
for (i = nunits - 1; i >= 0; --i)
t = tree_cons (NULL_TREE, def_for_init, t);
- vector_type = get_vectype_for_scalar_type (TREE_TYPE (def_for_init));
- gcc_assert (vector_type);
- init_def = build_vector (vector_type, t);
+ init_def = build_vector (vectype, t);
break;
case MIN_EXPR:
return false;
}
- /* If accesses through a pointer to vectype do not alias the original
- memory reference we have a problem. */
- if (get_alias_set (vectype) != get_alias_set (TREE_TYPE (scalar_dest))
- && !alias_set_subset_of (get_alias_set (vectype),
- get_alias_set (TREE_TYPE (scalar_dest))))
- {
- if (vect_print_dump_info (REPORT_DETAILS))
- fprintf (vect_dump, "vector type does not alias scalar type");
- return false;
- }
-
- if (!useless_type_conversion_p (TREE_TYPE (op), TREE_TYPE (scalar_dest)))
+ /* The scalar rhs type needs to be trivially convertible to the vector
+ component type. This should always be the case. */
+ if (!useless_type_conversion_p (TREE_TYPE (vectype), TREE_TYPE (op)))
{
if (vect_print_dump_info (REPORT_DETAILS))
- fprintf (vect_dump, "operands of different types");
+ fprintf (vect_dump, "??? operands of different types");
return false;
}
Therefore, NEXT_STMT can't be NULL_TREE. In case that
there is no interleaving, GROUP_SIZE is 1, and only one
iteration of the loop will be executed. */
- gcc_assert (next_stmt);
- gcc_assert (gimple_assign_single_p (next_stmt));
+ gcc_assert (next_stmt
+ && gimple_assign_single_p (next_stmt));
op = gimple_assign_rhs1 (next_stmt);
vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt,
}
}
+ /* We should have catched mismatched types earlier. */
+ gcc_assert (useless_type_conversion_p (vectype,
+ TREE_TYPE (vec_oprnd)));
dataref_ptr = vect_create_data_ref_ptr (first_stmt, NULL, NULL_TREE,
&dummy, &ptr_incr, false,
- &inv_p);
+ &inv_p, NULL);
gcc_assert (!inv_p);
}
else
vec_oprnd = VEC_index (tree, result_chain, i);
data_ref = build_fold_indirect_ref (dataref_ptr);
+
/* Arguments are ready. Create the new vector stmt. */
new_stmt = gimple_build_assign (data_ref, vec_oprnd);
vect_finish_stmt_generation (stmt, new_stmt, gsi);
pe = loop_preheader_edge (loop_for_initial_load);
vec_dest = vect_create_destination_var (scalar_dest, vectype);
ptr = vect_create_data_ref_ptr (stmt, loop_for_initial_load, NULL_TREE,
- &init_addr, &inc, true, &inv_p);
+ &init_addr, &inc, true, &inv_p, NULL_TREE);
data_ref = build1 (ALIGN_INDIRECT_REF, vectype, ptr);
new_stmt = gimple_build_assign (vec_dest, data_ref);
new_temp = make_ssa_name (vec_dest, new_stmt);
STMT_VINFO_VEC_STMT (vinfo_for_stmt (next_stmt)) = new_stmt;
else
{
- gimple prev_stmt =
- STMT_VINFO_VEC_STMT (vinfo_for_stmt (next_stmt));
- gimple rel_stmt =
- STMT_VINFO_RELATED_STMT (vinfo_for_stmt (prev_stmt));
- while (rel_stmt)
- {
- prev_stmt = rel_stmt;
- rel_stmt = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (rel_stmt));
- }
- STMT_VINFO_RELATED_STMT (vinfo_for_stmt (prev_stmt)) = new_stmt;
+ if (!DR_GROUP_SAME_DR_STMT (vinfo_for_stmt (next_stmt)))
+ {
+ gimple prev_stmt =
+ STMT_VINFO_VEC_STMT (vinfo_for_stmt (next_stmt));
+ gimple rel_stmt =
+ STMT_VINFO_RELATED_STMT (vinfo_for_stmt (prev_stmt));
+ while (rel_stmt)
+ {
+ prev_stmt = rel_stmt;
+ rel_stmt =
+ STMT_VINFO_RELATED_STMT (vinfo_for_stmt (rel_stmt));
+ }
+
+ STMT_VINFO_RELATED_STMT (vinfo_for_stmt (prev_stmt)) =
+ new_stmt;
+ }
}
+
next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt));
gap_count = 1;
/* If NEXT_STMT accesses the same DR as the previous statement,
for (i = mask_nunits - 1; i >= 0; --i)
t = tree_cons (NULL_TREE, build_int_cst (mask_element_type, mask_array[i]),
t);
-
mask_vec = build_vector (mask_type, t);
mask = vect_init_vector (stmt, mask_vec, mask_type, NULL);
return false;
}
- /* If accesses through a pointer to vectype do not alias the original
- memory reference we have a problem. */
- if (get_alias_set (vectype) != get_alias_set (scalar_type)
- && !alias_set_subset_of (get_alias_set (vectype),
- get_alias_set (scalar_type)))
- {
+ /* The vector component type needs to be trivially convertible to the
+ scalar lhs. This should always be the case. */
+ if (!useless_type_conversion_p (TREE_TYPE (scalar_dest), TREE_TYPE (vectype)))
+ {
if (vect_print_dump_info (REPORT_DETAILS))
- fprintf (vect_dump, "vector type does not alias scalar type");
+ fprintf (vect_dump, "??? operands of different types");
return false;
}
dataref_ptr = vect_create_data_ref_ptr (first_stmt,
at_loop, offset,
&dummy, &ptr_incr, false,
- &inv_p);
+ &inv_p, NULL_TREE);
else
dataref_ptr =
bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt, NULL_TREE);
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
gimple orig_stmt_in_pattern;
bool done;
+ loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
+ struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
switch (STMT_VINFO_TYPE (stmt_info))
{
}
}
+ /* Handle inner-loop stmts whose DEF is used in the loop-nest that
+ is being vectorized, but outside the immediately enclosing loop. */
+ if (vec_stmt
+ && nested_in_vect_loop_p (loop, stmt)
+ && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type
+ && (STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_outer
+ || STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_outer_by_reduction))
+ {
+ struct loop *innerloop = loop->inner;
+ imm_use_iterator imm_iter;
+ use_operand_p use_p;
+ tree scalar_dest;
+ gimple exit_phi;
+
+ if (vect_print_dump_info (REPORT_DETAILS))
+ fprintf (vect_dump, "Record the vdef for outer-loop vectorization.");
+
+ /* Find the relevant loop-exit phi-node, and reord the vec_stmt there
+ (to be used when vectorizing outer-loop stmts that use the DEF of
+ STMT). */
+ if (gimple_code (stmt) == GIMPLE_PHI)
+ scalar_dest = PHI_RESULT (stmt);
+ else
+ scalar_dest = gimple_assign_lhs (stmt);
+
+ FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
+ {
+ if (!flow_bb_inside_loop_p (innerloop, gimple_bb (USE_STMT (use_p))))
+ {
+ exit_phi = USE_STMT (use_p);
+ STMT_VINFO_VEC_STMT (vinfo_for_stmt (exit_phi)) = vec_stmt;
+ }
+ }
+ }
+
+ /* Handle stmts whose DEF is used outside the loop-nest that is
+ being vectorized. */
if (STMT_VINFO_LIVE_P (stmt_info)
&& STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
{
{
tree offset = DR_OFFSET (dr);
- niters = fold_build2 (MULT_EXPR, TREE_TYPE (niters), niters, DR_STEP (dr));
- offset = fold_build2 (PLUS_EXPR, TREE_TYPE (offset), offset, niters);
+ niters = fold_build2 (MULT_EXPR, sizetype,
+ fold_convert (sizetype, niters),
+ fold_convert (sizetype, DR_STEP (dr)));
+ offset = fold_build2 (PLUS_EXPR, sizetype, offset, niters);
DR_OFFSET (dr) = offset;
}
static bool
vect_schedule_slp_instance (slp_tree node, slp_instance instance,
- unsigned int vectorization_factor)
+ unsigned int vectorization_factor)
{
gimple stmt;
bool strided_store, is_store;
stmt = VEC_index (gimple, SLP_TREE_SCALAR_STMTS (node), 0);
stmt_info = vinfo_for_stmt (stmt);
+
/* VECTYPE is the type of the destination. */
vectype = get_vectype_for_scalar_type (TREE_TYPE (gimple_assign_lhs (stmt)));
nunits = (unsigned int) TYPE_VECTOR_SUBPARTS (vectype);
print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
}
- si = gsi_for_stmt (stmt);
+ /* Loads should be inserted before the first load. */
+ if (SLP_INSTANCE_FIRST_LOAD_STMT (instance)
+ && STMT_VINFO_STRIDED_ACCESS (stmt_info)
+ && !REFERENCE_CLASS_P (gimple_get_lhs (stmt)))
+ si = gsi_for_stmt (SLP_INSTANCE_FIRST_LOAD_STMT (instance));
+ else
+ si = gsi_for_stmt (stmt);
+
is_store = vect_transform_stmt (stmt, &si, &strided_store, node, instance);
if (is_store)
{
{
/* Schedule the tree of INSTANCE. */
is_store = vect_schedule_slp_instance (SLP_INSTANCE_TREE (instance),
- instance,
- LOOP_VINFO_VECT_FACTOR (loop_vinfo));
+ instance, LOOP_VINFO_VECT_FACTOR (loop_vinfo));
if (vect_print_dump_info (REPORT_VECTORIZED_LOOPS)
|| vect_print_dump_info (REPORT_UNVECTORIZED_LOOPS))