/* Loop autoparallelization.
- Copyright (C) 2006 Free Software Foundation, Inc.
+ Copyright (C) 2006, 2007 Free Software Foundation, Inc.
Contributed by Sebastian Pop <pop@cri.ensmp.fr> and
Zdenek Dvorak <dvorakz@suse.cz>.
....
- # A new variable is created for each reduction:
- "reduction_initial" is the initial value given by the user.
- It is kept and will be used after the parallel computing is done. #
+ # Storing the the initial value given by the user. #
- reduction_initial.24_46 = 1;
-
- # Storing the neutral value of the
- particular reduction's operation, e.g. 0 for PLUS_EXPR,
- 1 for MULT_EXPR, etc. into the reduction field.
- This is done in create_stores_for_reduction. #
+ .paral_data_store.32.sum.27 = 1;
- .paral_data_store.32.sum.27 = 0;
-
#pragma omp parallel num_threads(4)
#pragma omp for schedule(static)
- # sum.27_29 = PHI <sum.27_11, 0> # The neutral element replaces
- the user's inital value. #
+
+ # The neutral element corresponding to the particular
+ reduction's operation, e.g. 0 for PLUS_EXPR,
+ 1 for MULT_EXPR, etc. replaces the user's initial value. #
+
+ # sum.27_29 = PHI <sum.27_11, 0>
+
sum.27_11 = D.1827_8 + sum.27_29;
+
OMP_CONTINUE
# Adding this reduction phi is done at create_phi_for_local_result() #
# collecting the result after the join of the threads is done at
create_loads_for_reductions().
- a new variable "reduction_final" is created. It calculates the final
- value from the initial value and the value computed by the threads #
+ The value computed by the threads is loaded from the
+ shared struct. #
+
.paral_data_load.33_52 = &.paral_data_store.32;
- reduction_final.34_53 = .paral_data_load.33_52->sum.27;
- sum_37 = reduction_initial.24_46 + reduction_final.34_53;
+ sum_37 = .paral_data_load.33_52->sum.27;
sum_43 = D.1795_41 + sum_37;
exit bb:
enum tree_code reduction_code; /* code for the reduction operation. */
tree keep_res; /* The PHI_RESULT of this phi is the resulting value
of the reduction variable when existing the loop. */
- tree initial_value; /* An ssa name representing a new variable holding
- the initial value of the reduction var before entering the loop. */
+ tree initial_value; /* The initial value of the reduction var before entering the loop. */
tree field; /* the name of the field in the parloop data structure intended for reduction. */
tree init; /* reduction initialization value. */
tree new_phi; /* (helper field) Newly created phi node whose result
return ret;
}
-/* Assigns the address of VAR in TYPE to an ssa name, and returns this name.
+/* Return true when LOOP contains basic blocks marked with the
+ BB_IRREDUCIBLE_LOOP flag. */
+
+static inline bool
+loop_has_blocks_with_irreducible_flag (struct loop *loop)
+{
+ unsigned i;
+ basic_block *bbs = get_loop_body_in_dom_order (loop);
+ bool res = true;
+
+ for (i = 0; i < loop->num_nodes; i++)
+ if (bbs[i]->flags & BB_IRREDUCIBLE_LOOP)
+ goto end;
+
+ res = false;
+ end:
+ free (bbs);
+ return res;
+}
+
+/* Assigns the address of OBJ in TYPE to an ssa name, and returns this name.
The assignment statement is placed before LOOP. DECL_ADDRESS maps decls
- to their addresses that can be reused. */
+ to their addresses that can be reused. The address of OBJ is known to
+ be invariant in the whole function. */
static tree
-take_address_of (tree var, tree type, struct loop *loop, htab_t decl_address)
+take_address_of (tree obj, tree type, struct loop *loop, htab_t decl_address)
{
- int uid = DECL_UID (var);
+ int uid;
void **dslot;
struct int_tree_map ielt, *nielt;
- tree name, bvar, stmt;
+ tree *var_p, name, bvar, stmt, addr;
edge entry = loop_preheader_edge (loop);
+ /* Since the address of OBJ is invariant, the trees may be shared.
+ Avoid rewriting unrelated parts of the code. */
+ obj = unshare_expr (obj);
+ for (var_p = &obj;
+ handled_component_p (*var_p);
+ var_p = &TREE_OPERAND (*var_p, 0))
+ continue;
+ uid = DECL_UID (*var_p);
+
ielt.uid = uid;
dslot = htab_find_slot_with_hash (decl_address, &ielt, uid, INSERT);
if (!*dslot)
{
- bvar = create_tmp_var (type, get_name (var));
+ addr = build_addr (*var_p, current_function_decl);
+ bvar = create_tmp_var (TREE_TYPE (addr), get_name (*var_p));
add_referenced_var (bvar);
- stmt = build_gimple_modify_stmt (bvar,
- fold_convert (type,
- build_addr (var,
- current_function_decl)));
+ stmt = build_gimple_modify_stmt (bvar, addr);
name = make_ssa_name (bvar, stmt);
GIMPLE_STMT_OPERAND (stmt, 0) = name;
bsi_insert_on_edge_immediate (entry, stmt);
nielt->uid = uid;
nielt->to = name;
*dslot = nielt;
-
- return name;
}
+ else
+ name = ((struct int_tree_map *) *dslot)->to;
- name = ((struct int_tree_map *) *dslot)->to;
- if (TREE_TYPE (name) == type)
- return name;
+ if (var_p != &obj)
+ {
+ *var_p = build1 (INDIRECT_REF, TREE_TYPE (*var_p), name);
+ name = force_gimple_operand (build_addr (obj, current_function_decl),
+ &stmt, true, NULL_TREE);
+ if (stmt)
+ bsi_insert_on_edge_immediate (entry, stmt);
+ }
- bvar = SSA_NAME_VAR (name);
- stmt = build_gimple_modify_stmt (bvar, fold_convert (type, name));
- name = make_ssa_name (bvar, stmt);
- GIMPLE_STMT_OPERAND (stmt, 0) = name;
- bsi_insert_on_edge_immediate (entry, stmt);
+ if (TREE_TYPE (name) != type)
+ {
+ name = force_gimple_operand (fold_convert (type, name), &stmt, true,
+ NULL_TREE);
+ if (stmt)
+ bsi_insert_on_edge_immediate (entry, stmt);
+ }
return name;
}
static int
initialize_reductions (void **slot, void *data)
{
- tree stmt;
tree init, c;
- tree name1;
tree bvar, type, arg;
edge e;
e = loop_preheader_edge (loop);
arg = PHI_ARG_DEF_FROM_EDGE (reduc->reduc_phi, e);
/* Create new variable to hold the initial value. */
- type = TREE_TYPE (bvar);
- bvar = create_tmp_var (type, "reduction_initial");
- add_referenced_var (bvar);
-
- stmt = build_gimple_modify_stmt (bvar, arg);
- name1 = make_ssa_name (bvar, stmt);
- GIMPLE_STMT_OPERAND (stmt, 0) = name1;
- SSA_NAME_DEF_STMT (name1) = stmt;
- bsi_insert_on_edge_immediate (e, stmt);
SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE
(reduc->reduc_phi, loop_preheader_edge (loop)), init);
- reduc->initial_value = name1;
+ reduc->initial_value = arg;
return 1;
}
walk_tree. */
static tree
-eliminate_local_variables_1 (tree * tp, int *walk_subtrees, void *data)
+eliminate_local_variables_1 (tree *tp, int *walk_subtrees, void *data)
{
struct elv_data *dta = data;
- tree t = *tp, var, addr, addr_type, type;
+ tree t = *tp, var, addr, addr_type, type, obj;
if (DECL_P (t))
{
if (TREE_CODE (t) == ADDR_EXPR)
{
- var = TREE_OPERAND (t, 0);
- if (!DECL_P (var))
+ /* ADDR_EXPR may appear in two contexts:
+ -- as a gimple operand, when the address taken is a function invariant
+ -- as gimple rhs, when the resulting address in not a function
+ invariant
+ We do not need to do anything special in the latter case (the base of
+ the memory reference whose address is taken may be replaced in the
+ DECL_P case). The former case is more complicated, as we need to
+ ensure that the new address is still a gimple operand. Thus, it
+ is not sufficient to replace just the base of the memory reference --
+ we need to move the whole computation of the address out of the
+ loop. */
+ if (!is_gimple_val (t))
return NULL_TREE;
*walk_subtrees = 0;
- if (!SSA_VAR_P (var) || DECL_EXTERNAL (var))
+ obj = TREE_OPERAND (t, 0);
+ var = get_base_address (obj);
+ if (!var || !SSA_VAR_P (var) || DECL_EXTERNAL (var))
return NULL_TREE;
addr_type = TREE_TYPE (t);
- addr = take_address_of (var, addr_type, dta->loop, dta->decl_address);
+ addr = take_address_of (obj, addr_type, dta->loop, dta->decl_address);
*tp = addr;
dta->changed = true;
if (!*dslot)
{
var_copy = create_tmp_var (TREE_TYPE (var), get_name (var));
+ DECL_GIMPLE_REG_P (var_copy) = DECL_GIMPLE_REG_P (var);
add_referenced_var (var_copy);
nielt = XNEW (struct int_tree_map);
nielt->uid = uid;
htab_traverse (reduction_list, create_call_for_reduction_1, ld_st_data);
}
-/* Callback for htab_traverse. Create a new variable that loads the
- final reduction value at the
- join point of all threads, adds the initial value the reduction
- variable had before the parallel computation started, and
- inserts it in the right place. */
+/* Callback for htab_traverse. Loads the final reduction value at the
+ join point of all threads, and inserts it in the right place. */
static int
create_loads_for_reductions (void **slot, void *data)
tree type = TREE_TYPE (GIMPLE_STMT_OPERAND (red->reduc_stmt, 0));
tree struct_type = TREE_TYPE (TREE_TYPE (clsn_data->load));
tree load_struct;
- tree bvar, name;
+ tree name;
tree x;
bsi = bsi_after_labels (clsn_data->load_bb);
load_struct = fold_build1 (INDIRECT_REF, struct_type, clsn_data->load);
load_struct = build3 (COMPONENT_REF, type, load_struct, red->field,
NULL_TREE);
- bvar = create_tmp_var (type, "reduction_final");
- add_referenced_var (bvar);
-
- /* Apply operation between the new variable which is the result
- of computation all threads, and the initial value which is kept
- at reduction->inital_value. */
- stmt = build_gimple_modify_stmt (bvar, load_struct);
- name = make_ssa_name (bvar, stmt);
- GIMPLE_STMT_OPERAND (stmt, 0) = name;
- SSA_NAME_DEF_STMT (name) = stmt;
- bsi_insert_after (&bsi, stmt, BSI_NEW_STMT);
- x =
- fold_build2 (red->reduction_code, TREE_TYPE (load_struct),
- name, red->initial_value);
+ x = load_struct;
name = PHI_RESULT (red->keep_res);
stmt = build_gimple_modify_stmt (name, x);
GIMPLE_STMT_OPERAND (stmt, 0) = name;
build_gimple_modify_stmt (build3
(COMPONENT_REF, type, clsn_data->store,
red->field, NULL_TREE),
- red->init );
+ red->initial_value);
mark_virtual_ops_for_renaming (stmt);
bsi_insert_after (&bsi, stmt, BSI_NEW_STMT);
htab_traverse (name_copies, create_loads_and_stores_for_name,
ld_st_data);
- /* Load the calculation from memory into a new
- reduction variable (after the join of the threads). */
+ /* Load the calculation from memory (after the join of the threads). */
+
if (htab_elements (reduction_list) > 0)
{
htab_traverse (reduction_list, create_stores_for_reduction,
TREE_USED (t) = 1;
DECL_ARGUMENTS (decl) = t;
- allocate_struct_function (decl);
+ allocate_struct_function (decl, false);
/* The call to allocate_struct_function clobbers CFUN, so we need to restore
it. */
- cfun = act_cfun;
+ set_cfun (act_cfun);
return decl;
}
canonicalize_loop_ivs (struct loop *loop, htab_t reduction_list, tree nit)
{
unsigned precision = TYPE_PRECISION (TREE_TYPE (nit));
- tree phi, prev, res, type, var_before, val, atype, t, next;
+ tree phi, prev, res, type, var_before, val, atype, mtype, t, next;
block_stmt_iterator bsi;
bool ok;
affine_iv iv;
remove_phi_node (phi, prev, false);
atype = TREE_TYPE (res);
- val = fold_build2 (PLUS_EXPR, atype,
- unshare_expr (iv.base),
- fold_build2 (MULT_EXPR, atype,
- unshare_expr (iv.step),
- fold_convert (atype, var_before)));
+ mtype = POINTER_TYPE_P (atype) ? sizetype : atype;
+ val = fold_build2 (MULT_EXPR, mtype, unshare_expr (iv.step),
+ fold_convert (mtype, var_before));
+ val = fold_build2 (POINTER_TYPE_P (atype)
+ ? POINTER_PLUS_EXPR : PLUS_EXPR,
+ atype, unshare_expr (iv.base), val);
val = force_gimple_operand_bsi (&bsi, val, false, NULL_TREE, true,
BSI_SAME_STMT);
t = build_gimple_modify_stmt (res, val);
/* Initialize the control variable to NIT. */
bsi = bsi_after_labels (ex_bb);
+ nit = force_gimple_operand_bsi (&bsi,
+ fold_convert (TREE_TYPE (control_name), nit),
+ false, NULL_TREE, false, BSI_SAME_STMT);
t = build_gimple_modify_stmt (control_name, nit);
bsi_insert_before (&bsi, t, BSI_NEW_STMT);
SSA_NAME_DEF_STMT (control_name) = t;
unsigned n_threads, struct tree_niter_desc *niter)
{
struct loop *nloop;
+ loop_iterator li;
tree many_iterations_cond, type, nit;
tree stmts, arg_struct, new_arg_struct;
basic_block parallel_head;
expander to do it). */
cancel_loop_tree (loop);
+ /* Free loop bound estimations that could contain references to
+ removed statements. */
+ FOR_EACH_LOOP (li, loop, 0)
+ free_numbers_of_iterations_estimates_loop (loop);
+
/* Expand the parallel constructs. We do it directly here instead of running
a separate expand_omp pass, since it is more efficient, and less likely to
cause troubles with further analyses not being able to deal with the
|| expected_loop_iterations (loop) <= n_threads
/* And of course, the loop must be parallelizable. */
|| !can_duplicate_loop_p (loop)
+ || loop_has_blocks_with_irreducible_flag (loop)
|| !loop_parallel_p (loop, reduction_list, &niter_desc))
continue;