marshalling to implement data sharing and copying clauses.
Contributed by Diego Novillo <dnovillo@redhat.com>
- Copyright (C) 2005, 2006 Free Software Foundation, Inc.
+ Copyright (C) 2005, 2006, 2007 Free Software Foundation, Inc.
This file is part of GCC.
static void scan_omp (tree *, omp_context *);
static void lower_omp (tree *, omp_context *);
+static tree lookup_decl_in_outer_ctx (tree, omp_context *);
+static tree maybe_lookup_decl_in_outer_ctx (tree, omp_context *);
/* Find an OpenMP clause of type KIND within CLAUSES. */
fd->pre = NULL;
t = OMP_FOR_INIT (for_stmt);
- gcc_assert (TREE_CODE (t) == MODIFY_EXPR);
- fd->v = TREE_OPERAND (t, 0);
+ gcc_assert (TREE_CODE (t) == GIMPLE_MODIFY_STMT);
+ fd->v = GIMPLE_STMT_OPERAND (t, 0);
gcc_assert (DECL_P (fd->v));
gcc_assert (TREE_CODE (TREE_TYPE (fd->v)) == INTEGER_TYPE);
- fd->n1 = TREE_OPERAND (t, 1);
+ fd->n1 = GIMPLE_STMT_OPERAND (t, 1);
t = OMP_FOR_COND (for_stmt);
fd->cond_code = TREE_CODE (t);
}
t = OMP_FOR_INCR (fd->for_stmt);
- gcc_assert (TREE_CODE (t) == MODIFY_EXPR);
- gcc_assert (TREE_OPERAND (t, 0) == fd->v);
- t = TREE_OPERAND (t, 1);
+ gcc_assert (TREE_CODE (t) == GIMPLE_MODIFY_STMT);
+ gcc_assert (GIMPLE_STMT_OPERAND (t, 0) == fd->v);
+ t = GIMPLE_STMT_OPERAND (t, 1);
gcc_assert (TREE_OPERAND (t, 0) == fd->v);
switch (TREE_CODE (t))
{
basic_block par_entry_bb, par_exit_bb;
basic_block ws_entry_bb, ws_exit_bb;
- if (region == NULL || region->inner == NULL)
+ if (region == NULL || region->inner == NULL
+ || region->exit == NULL || region->inner->exit == NULL)
return;
/* We only support parallel+for and parallel+sections. */
if (AGGREGATE_TYPE_P (TREE_TYPE (decl)))
return true;
- /* We can only use copy-in/copy-out semantics for shared varibles
+ /* We can only use copy-in/copy-out semantics for shared variables
when we know the value is not accessible from an outer scope. */
if (shared_p)
{
without analyzing the expression whether or not its location
is accessible to anyone else. In the case of nested parallel
regions it certainly may be. */
- if (DECL_HAS_VALUE_EXPR_P (decl))
+ if (TREE_CODE (decl) != RESULT_DECL && DECL_HAS_VALUE_EXPR_P (decl))
return true;
/* Do not use copy-in/copy-out for variables that have their
tree copy = build_decl (VAR_DECL, name, type);
TREE_ADDRESSABLE (copy) = TREE_ADDRESSABLE (var);
- DECL_COMPLEX_GIMPLE_REG_P (copy) = DECL_COMPLEX_GIMPLE_REG_P (var);
+ DECL_GIMPLE_REG_P (copy) = DECL_GIMPLE_REG_P (var);
DECL_ARTIFICIAL (copy) = DECL_ARTIFICIAL (var);
DECL_IGNORED_P (copy) = DECL_IGNORED_P (var);
TREE_USED (copy) = 1;
{
tree x;
- if (is_global_var (var))
+ if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx)))
x = var;
else if (is_variable_sized (var))
{
}
else if (ctx->outer)
x = lookup_decl (var, ctx->outer);
+ else if (is_reference (var))
+ /* This can happen with orphaned constructs. If var is reference, it is
+ possible it is shared and as such valid. */
+ x = var;
else
gcc_unreachable ();
omp_context *ctx = (omp_context *) cb;
tree new_var;
- if (is_global_var (var) || decl_function_context (var) != ctx->cb.src_fn)
- return var;
-
if (TREE_CODE (var) == LABEL_DECL)
{
new_var = create_artificial_label ();
return new_var;
}
+ if (is_global_var (var) || decl_function_context (var) != ctx->cb.src_fn)
+ return var;
+
return error_mark_node;
}
}
if (region->exit)
- fprintf (file, "%*sbb: %d: OMP_RETURN\n", indent, "",
+ fprintf (file, "%*sbb %d: OMP_RETURN\n", indent, "",
region->exit->index);
else
fprintf (file, "%*s[no exit marker]\n", indent, "");
decl = OMP_CLAUSE_DECL (c);
gcc_assert (!is_variable_sized (decl));
by_ref = use_pointer_for_field (decl, true);
+ /* Global variables don't need to be copied,
+ the receiver side will use them directly. */
+ if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
+ break;
if (! TREE_READONLY (decl)
|| TREE_ADDRESSABLE (decl)
|| by_ref
do_private:
if (is_variable_sized (decl))
break;
- else if (is_parallel_ctx (ctx))
+ else if (is_parallel_ctx (ctx)
+ && ! is_global_var (maybe_lookup_decl_in_outer_ctx (decl,
+ ctx)))
{
by_ref = use_pointer_for_field (decl, false);
install_var_field (decl, by_ref, ctx);
case OMP_CLAUSE_SHARED:
decl = OMP_CLAUSE_DECL (c);
- fixup_remapped_decl (decl, ctx, false);
+ if (! is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
+ fixup_remapped_decl (decl, ctx, false);
break;
case OMP_CLAUSE_COPYPRIVATE:
}
+/* Check OpenMP nesting restrictions. */
+static void
+check_omp_nesting_restrictions (tree t, omp_context *ctx)
+{
+ switch (TREE_CODE (t))
+ {
+ case OMP_FOR:
+ case OMP_SECTIONS:
+ case OMP_SINGLE:
+ for (; ctx != NULL; ctx = ctx->outer)
+ switch (TREE_CODE (ctx->stmt))
+ {
+ case OMP_FOR:
+ case OMP_SECTIONS:
+ case OMP_SINGLE:
+ case OMP_ORDERED:
+ case OMP_MASTER:
+ warning (0, "work-sharing region may not be closely nested inside "
+ "of work-sharing, critical, ordered or master region");
+ return;
+ case OMP_PARALLEL:
+ return;
+ default:
+ break;
+ }
+ break;
+ case OMP_MASTER:
+ for (; ctx != NULL; ctx = ctx->outer)
+ switch (TREE_CODE (ctx->stmt))
+ {
+ case OMP_FOR:
+ case OMP_SECTIONS:
+ case OMP_SINGLE:
+ warning (0, "master region may not be closely nested inside "
+ "of work-sharing region");
+ return;
+ case OMP_PARALLEL:
+ return;
+ default:
+ break;
+ }
+ break;
+ case OMP_ORDERED:
+ for (; ctx != NULL; ctx = ctx->outer)
+ switch (TREE_CODE (ctx->stmt))
+ {
+ case OMP_CRITICAL:
+ warning (0, "ordered region may not be closely nested inside "
+ "of critical region");
+ return;
+ case OMP_FOR:
+ if (find_omp_clause (OMP_CLAUSES (ctx->stmt),
+ OMP_CLAUSE_ORDERED) == NULL)
+ warning (0, "ordered region must be closely nested inside "
+ "a loop region with an ordered clause");
+ return;
+ case OMP_PARALLEL:
+ return;
+ default:
+ break;
+ }
+ break;
+ case OMP_CRITICAL:
+ for (; ctx != NULL; ctx = ctx->outer)
+ if (TREE_CODE (ctx->stmt) == OMP_CRITICAL
+ && OMP_CRITICAL_NAME (t) == OMP_CRITICAL_NAME (ctx->stmt))
+ {
+ warning (0, "critical region may not be nested inside a critical "
+ "region with the same name");
+ return;
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+
/* Callback for walk_stmts used to scan for OpenMP directives at TP. */
static tree
if (EXPR_HAS_LOCATION (t))
input_location = EXPR_LOCATION (t);
+ /* Check the OpenMP nesting restrictions. */
+ if (OMP_DIRECTIVE_P (t) && ctx != NULL)
+ check_omp_nesting_restrictions (t, ctx);
+
*walk_subtrees = 0;
switch (TREE_CODE (t))
{
case VAR_DECL:
case PARM_DECL:
case LABEL_DECL:
+ case RESULT_DECL:
if (ctx)
*tp = remap_decl (t, &ctx->cb);
break;
static void
build_omp_barrier (tree *stmt_list)
{
- tree t;
-
- t = built_in_decls[BUILT_IN_GOMP_BARRIER];
- t = build_function_call_expr (t, NULL);
+ tree t = build_call_expr (built_in_decls[BUILT_IN_GOMP_BARRIER], 0);
gimplify_and_add (t, stmt_list);
}
}
+/* Similar to lookup_decl_in_outer_ctx, but return DECL if not found
+ in outer contexts. */
+
+static tree
+maybe_lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
+{
+ tree t = NULL;
+ omp_context *up;
+
+ if (ctx->is_nested)
+ for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
+ t = maybe_lookup_decl (decl, up);
+
+ return t ? t : decl;
+}
+
+
/* Construct the initialization value for reduction CLAUSE. */
tree
static void
lower_rec_input_clauses (tree clauses, tree *ilist, tree *dlist,
- omp_context *ctx)
+ omp_context *ctx)
{
tree_stmt_iterator diter;
- tree c, dtor, copyin_seq, x, args, ptr;
+ tree c, dtor, copyin_seq, x, ptr;
bool copyin_by_ref = false;
+ bool lastprivate_firstprivate = false;
int pass;
*dlist = alloc_stmt_list ();
continue;
break;
case OMP_CLAUSE_SHARED:
+ if (maybe_lookup_decl (OMP_CLAUSE_DECL (c), ctx) == NULL)
+ {
+ gcc_assert (is_global_var (OMP_CLAUSE_DECL (c)));
+ continue;
+ }
case OMP_CLAUSE_FIRSTPRIVATE:
- case OMP_CLAUSE_LASTPRIVATE:
case OMP_CLAUSE_COPYIN:
case OMP_CLAUSE_REDUCTION:
break;
+ case OMP_CLAUSE_LASTPRIVATE:
+ if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
+ {
+ lastprivate_firstprivate = true;
+ if (pass != 0)
+ continue;
+ }
+ break;
default:
continue;
}
gcc_assert (DECL_P (ptr));
x = TYPE_SIZE_UNIT (TREE_TYPE (new_var));
- args = tree_cons (NULL, x, NULL);
- x = built_in_decls[BUILT_IN_ALLOCA];
- x = build_function_call_expr (x, args);
+ x = build_call_expr (built_in_decls[BUILT_IN_ALLOCA], 1, x);
x = fold_convert (TREE_TYPE (ptr), x);
- x = build2 (MODIFY_EXPR, void_type_node, ptr, x);
+ x = build_gimple_modify_stmt (ptr, x);
gimplify_and_add (x, ilist);
}
else if (is_reference (var))
code that expects a pointer to something that expects
a direct variable. Note that this doesn't apply to
C++, since reference types are disallowed in data
- sharing clauses there. */
+ sharing clauses there, except for NRV optimized
+ return values. */
if (pass == 0)
continue;
if (DECL_NAME (var))
name = IDENTIFIER_POINTER (DECL_NAME (new_var));
- x = create_tmp_var (TREE_TYPE (TREE_TYPE (new_var)), name);
+ x = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var)),
+ name);
+ gimple_add_tmp_var (x);
x = build_fold_addr_expr_with_type (x, TREE_TYPE (new_var));
}
else
{
- args = tree_cons (NULL, x, NULL);
- x = built_in_decls[BUILT_IN_ALLOCA];
- x = build_function_call_expr (x, args);
+ x = build_call_expr (built_in_decls[BUILT_IN_ALLOCA], 1, x);
x = fold_convert (TREE_TYPE (new_var), x);
}
- x = build2 (MODIFY_EXPR, void_type_node, new_var, x);
+ x = build_gimple_modify_stmt (new_var, x);
gimplify_and_add (x, ilist);
new_var = build_fold_indirect_ref (new_var);
switch (OMP_CLAUSE_CODE (c))
{
case OMP_CLAUSE_SHARED:
+ /* Shared global vars are just accessed directly. */
+ if (is_global_var (new_var))
+ break;
/* Set up the DECL_VALUE_EXPR for shared variables now. This
needs to be delayed until after fixup_child_record_type so
that we get the correct type during the dereference. */
{
x = omp_reduction_init (c, TREE_TYPE (new_var));
gcc_assert (TREE_CODE (TREE_TYPE (new_var)) != ARRAY_TYPE);
- x = build2 (MODIFY_EXPR, void_type_node, new_var, x);
+ x = build_gimple_modify_stmt (new_var, x);
gimplify_and_add (x, ilist);
}
break;
but it certainly is to C++ operator=. */
if (copyin_seq)
{
- x = built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM];
- x = build_function_call_expr (x, NULL);
+ x = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
x = build2 (NE_EXPR, boolean_type_node, x,
build_int_cst (TREE_TYPE (x), 0));
x = build3 (COND_EXPR, void_type_node, x, copyin_seq, NULL);
/* If any copyin variable is passed by reference, we must ensure the
master thread doesn't modify it before it is copied over in all
- threads. */
- if (copyin_by_ref)
+ threads. Similarly for variables in both firstprivate and
+ lastprivate clauses we need to ensure the lastprivate copying
+ happens after firstprivate copying in all threads. */
+ if (copyin_by_ref || lastprivate_firstprivate)
build_omp_barrier (ilist);
}
{
x = build2 (code, TREE_TYPE (ref), ref, new_var);
ref = build_outer_var_ref (var, ctx);
- x = build2 (MODIFY_EXPR, void_type_node, ref, x);
+ x = build_gimple_modify_stmt (ref, x);
append_to_statement_list (x, &sub_list);
}
}
- x = built_in_decls[BUILT_IN_GOMP_ATOMIC_START];
- x = build_function_call_expr (x, NULL);
+ x = build_call_expr (built_in_decls[BUILT_IN_GOMP_ATOMIC_START], 0);
gimplify_and_add (x, stmt_list);
gimplify_and_add (sub_list, stmt_list);
- x = built_in_decls[BUILT_IN_GOMP_ATOMIC_END];
- x = build_function_call_expr (x, NULL);
+ x = build_call_expr (built_in_decls[BUILT_IN_GOMP_ATOMIC_END], 0);
gimplify_and_add (x, stmt_list);
}
ref = build_sender_ref (var, ctx);
x = (ctx->is_nested) ? lookup_decl_in_outer_ctx (var, ctx) : var;
x = by_ref ? build_fold_addr_expr (x) : x;
- x = build2 (MODIFY_EXPR, void_type_node, ref, x);
+ x = build_gimple_modify_stmt (ref, x);
gimplify_and_add (x, slist);
ref = build_receiver_ref (var, by_ref, ctx);
if (ctx->is_nested)
var = lookup_decl_in_outer_ctx (val, ctx);
+ if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYIN
+ && is_global_var (var))
+ continue;
if (is_variable_sized (val))
continue;
by_ref = use_pointer_for_field (val, false);
{
ref = build_sender_ref (val, ctx);
x = by_ref ? build_fold_addr_expr (var) : var;
- x = build2 (MODIFY_EXPR, void_type_node, ref, x);
+ x = build_gimple_modify_stmt (ref, x);
gimplify_and_add (x, ilist);
}
if (do_out)
{
ref = build_sender_ref (val, ctx);
- x = build2 (MODIFY_EXPR, void_type_node, var, ref);
+ x = build_gimple_modify_stmt (var, ref);
gimplify_and_add (x, olist);
}
}
{
x = build_sender_ref (ovar, ctx);
var = build_fold_addr_expr (var);
- x = build2 (MODIFY_EXPR, void_type_node, x, var);
+ x = build_gimple_modify_stmt (x, var);
gimplify_and_add (x, ilist);
}
else
{
x = build_sender_ref (ovar, ctx);
- x = build2 (MODIFY_EXPR, void_type_node, x, var);
+ x = build_gimple_modify_stmt (x, var);
gimplify_and_add (x, ilist);
x = build_sender_ref (ovar, ctx);
- x = build2 (MODIFY_EXPR, void_type_node, var, x);
+ x = build_gimple_modify_stmt (var, x);
gimplify_and_add (x, olist);
}
}
expand_parallel_call (struct omp_region *region, basic_block bb,
tree entry_stmt, tree ws_args)
{
- tree t, args, val, cond, c, list, clauses;
+ tree t, t1, t2, val, cond, c, list, clauses;
block_stmt_iterator si;
int start_ix;
si = bsi_start (then_bb);
t = build1 (LABEL_EXPR, void_type_node, then_lab);
bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
- t = build2 (MODIFY_EXPR, void_type_node, tmp, val);
+ t = build_gimple_modify_stmt (tmp, val);
bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
si = bsi_start (else_bb);
t = build1 (LABEL_EXPR, void_type_node, else_lab);
bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
- t = build2 (MODIFY_EXPR, void_type_node, tmp,
- build_int_cst (unsigned_type_node, 1));
+ t = build_gimple_modify_stmt (tmp,
+ build_int_cst (unsigned_type_node, 1));
bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
}
list = NULL_TREE;
- args = tree_cons (NULL, val, NULL);
t = OMP_PARALLEL_DATA_ARG (entry_stmt);
if (t == NULL)
- t = null_pointer_node;
+ t1 = null_pointer_node;
else
- t = build_fold_addr_expr (t);
- args = tree_cons (NULL, t, args);
- t = build_fold_addr_expr (OMP_PARALLEL_FN (entry_stmt));
- args = tree_cons (NULL, t, args);
+ t1 = build_fold_addr_expr (t);
+ t2 = build_fold_addr_expr (OMP_PARALLEL_FN (entry_stmt));
if (ws_args)
- args = chainon (args, ws_args);
+ {
+ tree args = tree_cons (NULL, t2,
+ tree_cons (NULL, t1,
+ tree_cons (NULL, val, ws_args)));
+ t = build_function_call_expr (built_in_decls[start_ix], args);
+ }
+ else
+ t = build_call_expr (built_in_decls[start_ix], 3, t2, t1, val);
- t = built_in_decls[start_ix];
- t = build_function_call_expr (t, args);
gimplify_and_add (t, &list);
t = OMP_PARALLEL_DATA_ARG (entry_stmt);
t = null_pointer_node;
else
t = build_fold_addr_expr (t);
- args = tree_cons (NULL, t, NULL);
- t = build_function_call_expr (OMP_PARALLEL_FN (entry_stmt), args);
+ t = build_call_expr (OMP_PARALLEL_FN (entry_stmt), 1, t);
gimplify_and_add (t, &list);
- t = built_in_decls[BUILT_IN_GOMP_PARALLEL_END];
- t = build_function_call_expr (t, NULL);
+ t = build_call_expr (built_in_decls[BUILT_IN_GOMP_PARALLEL_END], 0);
gimplify_and_add (t, &list);
si = bsi_last (bb);
if (lang_protect_cleanup_actions)
t = lang_protect_cleanup_actions ();
else
- {
- t = built_in_decls[BUILT_IN_TRAP];
- t = build_function_call_expr (t, NULL);
- }
+ t = build_call_expr (built_in_decls[BUILT_IN_TRAP], 0);
f = build2 (EH_FILTER_EXPR, void_type_node, NULL, NULL);
EH_FILTER_MUST_NOT_THROW (f) = 1;
gimplify_and_add (t, &EH_FILTER_FAILURE (f));
block_stmt_iterator si;
entry_succ_e = single_succ_edge (entry_bb);
- exit_succ_e = single_succ_edge (exit_bb);
si = bsi_last (entry_bb);
gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_PARALLEL);
new_bb = entry_bb;
remove_edge (entry_succ_e);
- make_edge (new_bb, exit_succ_e->dest, EDGE_FALLTHRU);
+ if (exit_bb)
+ {
+ exit_succ_e = single_succ_edge (exit_bb);
+ make_edge (new_bb, exit_succ_e->dest, EDGE_FALLTHRU);
+ }
}
else
{
/* If the parallel region needs data sent from the parent
- function, then the very first statement of the parallel body
+ function, then the very first statement (except possible
+ tree profile counter updates) of the parallel body
is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
&.OMP_DATA_O is passed as an argument to the child function,
we need to replace it with the argument as seen by the child
if (OMP_PARALLEL_DATA_ARG (entry_stmt))
{
basic_block entry_succ_bb = single_succ (entry_bb);
- block_stmt_iterator si = bsi_start (entry_succ_bb);
- tree stmt;
+ block_stmt_iterator si;
- gcc_assert (!bsi_end_p (si));
+ for (si = bsi_start (entry_succ_bb); ; bsi_next (&si))
+ {
+ tree stmt, arg;
- stmt = bsi_stmt (si);
- gcc_assert (TREE_CODE (stmt) == MODIFY_EXPR
- && TREE_CODE (TREE_OPERAND (stmt, 1)) == ADDR_EXPR
- && TREE_OPERAND (TREE_OPERAND (stmt, 1), 0)
- == OMP_PARALLEL_DATA_ARG (entry_stmt));
+ gcc_assert (!bsi_end_p (si));
+ stmt = bsi_stmt (si);
+ if (TREE_CODE (stmt) != GIMPLE_MODIFY_STMT)
+ continue;
- if (TREE_OPERAND (stmt, 0) == DECL_ARGUMENTS (child_fn))
- bsi_remove (&si, true);
- else
- TREE_OPERAND (stmt, 1) = DECL_ARGUMENTS (child_fn);
+ arg = GIMPLE_STMT_OPERAND (stmt, 1);
+ STRIP_NOPS (arg);
+ if (TREE_CODE (arg) == ADDR_EXPR
+ && TREE_OPERAND (arg, 0)
+ == OMP_PARALLEL_DATA_ARG (entry_stmt))
+ {
+ if (GIMPLE_STMT_OPERAND (stmt, 0)
+ == DECL_ARGUMENTS (child_fn))
+ bsi_remove (&si, true);
+ else
+ GIMPLE_STMT_OPERAND (stmt, 1) = DECL_ARGUMENTS (child_fn);
+ break;
+ }
+ }
}
/* Declare local variables needed in CHILD_CFUN. */
new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb);
if (exit_bb)
single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
- cgraph_add_new_function (child_fn);
+ DECL_STRUCT_FUNCTION (child_fn)->curr_properties
+ = cfun->curr_properties;
+ cgraph_add_new_function (child_fn, true);
/* Convert OMP_RETURN into a RETURN_EXPR. */
if (exit_bb)
gcc_assert (!bsi_end_p (si)
&& TREE_CODE (bsi_stmt (si)) == OMP_RETURN);
t = build1 (RETURN_EXPR, void_type_node, NULL);
- bsi_insert_after (&si, t, TSI_SAME_STMT);
+ bsi_insert_after (&si, t, BSI_SAME_STMT);
bsi_remove (&si, true);
}
}
enum built_in_function start_fn,
enum built_in_function next_fn)
{
- tree l0, l1, l2, l3;
+ tree l0, l1, l2 = NULL, l3 = NULL;
tree type, istart0, iend0, iend;
- tree t, args, list;
- basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, l2_bb, l3_bb;
+ tree t, list;
+ basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb;
+ basic_block l2_bb = NULL, l3_bb = NULL;
block_stmt_iterator si;
bool in_combined_parallel = is_combined_parallel (region);
TREE_ADDRESSABLE (istart0) = 1;
TREE_ADDRESSABLE (iend0) = 1;
+ gcc_assert ((region->cont != NULL) ^ (region->exit == NULL));
+
entry_bb = region->entry;
l0_bb = create_empty_bb (entry_bb);
l1_bb = single_succ (entry_bb);
- cont_bb = region->cont;
- l2_bb = create_empty_bb (cont_bb);
- l3_bb = single_succ (cont_bb);
- exit_bb = region->exit;
l0 = tree_block_label (l0_bb);
l1 = tree_block_label (l1_bb);
- l2 = tree_block_label (l2_bb);
- l3 = tree_block_label (l3_bb);
+
+ cont_bb = region->cont;
+ exit_bb = region->exit;
+ if (cont_bb)
+ {
+ l2_bb = create_empty_bb (cont_bb);
+ l3_bb = single_succ (cont_bb);
+
+ l2 = tree_block_label (l2_bb);
+ l3 = tree_block_label (l3_bb);
+ }
si = bsi_last (entry_bb);
gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_FOR);
if (!in_combined_parallel)
{
+ tree t0, t1, t2, t3, t4;
/* If this is not a combined parallel loop, emit a call to
GOMP_loop_foo_start in ENTRY_BB. */
list = alloc_stmt_list ();
- t = build_fold_addr_expr (iend0);
- args = tree_cons (NULL, t, NULL);
- t = build_fold_addr_expr (istart0);
- args = tree_cons (NULL, t, args);
+ t4 = build_fold_addr_expr (iend0);
+ t3 = build_fold_addr_expr (istart0);
+ t2 = fold_convert (long_integer_type_node, fd->step);
+ t1 = fold_convert (long_integer_type_node, fd->n2);
+ t0 = fold_convert (long_integer_type_node, fd->n1);
if (fd->chunk_size)
{
t = fold_convert (long_integer_type_node, fd->chunk_size);
- args = tree_cons (NULL, t, args);
+ t = build_call_expr (built_in_decls[start_fn], 6,
+ t0, t1, t2, t, t3, t4);
}
- t = fold_convert (long_integer_type_node, fd->step);
- args = tree_cons (NULL, t, args);
- t = fold_convert (long_integer_type_node, fd->n2);
- args = tree_cons (NULL, t, args);
- t = fold_convert (long_integer_type_node, fd->n1);
- args = tree_cons (NULL, t, args);
- t = build_function_call_expr (built_in_decls[start_fn], args);
+ else
+ t = build_call_expr (built_in_decls[start_fn], 5,
+ t0, t1, t2, t3, t4);
t = get_formal_tmp_var (t, &list);
- t = build3 (COND_EXPR, void_type_node, t, build_and_jump (&l0),
- build_and_jump (&l3));
- append_to_statement_list (t, &list);
+ if (cont_bb)
+ {
+ t = build3 (COND_EXPR, void_type_node, t, build_and_jump (&l0),
+ build_and_jump (&l3));
+ append_to_statement_list (t, &list);
+ }
bsi_insert_after (&si, list, BSI_SAME_STMT);
}
bsi_remove (&si, true);
/* Iteration setup for sequential loop goes in L0_BB. */
list = alloc_stmt_list ();
t = fold_convert (type, istart0);
- t = build2 (MODIFY_EXPR, void_type_node, fd->v, t);
+ t = build_gimple_modify_stmt (fd->v, t);
gimplify_and_add (t, &list);
t = fold_convert (type, iend0);
- t = build2 (MODIFY_EXPR, void_type_node, iend, t);
+ t = build_gimple_modify_stmt (iend, t);
gimplify_and_add (t, &list);
si = bsi_start (l0_bb);
bsi_insert_after (&si, list, BSI_CONTINUE_LINKING);
+ /* Handle the rare case where BODY doesn't ever return. */
+ if (cont_bb == NULL)
+ {
+ remove_edge (single_succ_edge (entry_bb));
+ make_edge (entry_bb, l0_bb, EDGE_FALLTHRU);
+ make_edge (l0_bb, l1_bb, EDGE_FALLTHRU);
+ return;
+ }
+
/* Code to control the increment and predicate for the sequential
loop goes in the first half of EXIT_BB (we split EXIT_BB so
that we can inherit all the edges going out of the loop
list = alloc_stmt_list ();
t = build2 (PLUS_EXPR, type, fd->v, fd->step);
- t = build2 (MODIFY_EXPR, void_type_node, fd->v, t);
+ t = build_gimple_modify_stmt (fd->v, t);
gimplify_and_add (t, &list);
t = build2 (fd->cond_code, boolean_type_node, fd->v, iend);
/* Emit code to get the next parallel iteration in L2_BB. */
list = alloc_stmt_list ();
- t = build_fold_addr_expr (iend0);
- args = tree_cons (NULL, t, NULL);
- t = build_fold_addr_expr (istart0);
- args = tree_cons (NULL, t, args);
- t = build_function_call_expr (built_in_decls[next_fn], args);
+ t = build_call_expr (built_in_decls[next_fn], 2,
+ build_fold_addr_expr (istart0),
+ build_fold_addr_expr (iend0));
t = get_formal_tmp_var (t, &list);
t = build3 (COND_EXPR, void_type_node, t, build_and_jump (&l0),
build_and_jump (&l3));
t = built_in_decls[BUILT_IN_GOMP_LOOP_END_NOWAIT];
else
t = built_in_decls[BUILT_IN_GOMP_LOOP_END];
- t = build_function_call_expr (t, NULL);
+ t = build_call_expr (t, 0);
bsi_insert_after (&si, t, BSI_SAME_STMT);
bsi_remove (&si, true);
struct omp_for_data *fd)
{
tree l0, l1, l2, n, q, s0, e0, e, t, nthreads, threadid;
- tree type, utype, list;
+ tree type, list;
basic_block entry_bb, exit_bb, seq_start_bb, body_bb, cont_bb;
basic_block fin_bb;
block_stmt_iterator si;
type = TREE_TYPE (fd->v);
- utype = lang_hooks.types.unsigned_type (type);
entry_bb = region->entry;
seq_start_bb = create_empty_bb (entry_bb);
/* Iteration space partitioning goes in ENTRY_BB. */
list = alloc_stmt_list ();
- t = built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS];
- t = build_function_call_expr (t, NULL);
- t = fold_convert (utype, t);
+ t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS], 0);
+ t = fold_convert (type, t);
nthreads = get_formal_tmp_var (t, &list);
- t = built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM];
- t = build_function_call_expr (t, NULL);
- t = fold_convert (utype, t);
+ t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
+ t = fold_convert (type, t);
threadid = get_formal_tmp_var (t, &list);
fd->n1 = fold_convert (type, fd->n1);
t = fold_build2 (PLUS_EXPR, type, t, fd->n2);
t = fold_build2 (MINUS_EXPR, type, t, fd->n1);
t = fold_build2 (TRUNC_DIV_EXPR, type, t, fd->step);
- t = fold_convert (utype, t);
+ t = fold_convert (type, t);
if (is_gimple_val (t))
n = t;
else
n = get_formal_tmp_var (t, &list);
- t = build2 (TRUNC_DIV_EXPR, utype, n, nthreads);
+ t = build2 (TRUNC_DIV_EXPR, type, n, nthreads);
q = get_formal_tmp_var (t, &list);
- t = build2 (MULT_EXPR, utype, q, nthreads);
- t = build2 (NE_EXPR, utype, t, n);
- t = build2 (PLUS_EXPR, utype, q, t);
+ t = build2 (MULT_EXPR, type, q, nthreads);
+ t = build2 (NE_EXPR, type, t, n);
+ t = build2 (PLUS_EXPR, type, q, t);
q = get_formal_tmp_var (t, &list);
- t = build2 (MULT_EXPR, utype, q, threadid);
+ t = build2 (MULT_EXPR, type, q, threadid);
s0 = get_formal_tmp_var (t, &list);
- t = build2 (PLUS_EXPR, utype, s0, q);
- t = build2 (MIN_EXPR, utype, t, n);
+ t = build2 (PLUS_EXPR, type, s0, q);
+ t = build2 (MIN_EXPR, type, t, n);
e0 = get_formal_tmp_var (t, &list);
t = build2 (GE_EXPR, boolean_type_node, s0, e0);
t = fold_convert (type, s0);
t = build2 (MULT_EXPR, type, t, fd->step);
t = build2 (PLUS_EXPR, type, t, fd->n1);
- t = build2 (MODIFY_EXPR, void_type_node, fd->v, t);
+ t = build_gimple_modify_stmt (fd->v, t);
gimplify_and_add (t, &list);
t = fold_convert (type, e0);
list = alloc_stmt_list ();
t = build2 (PLUS_EXPR, type, fd->v, fd->step);
- t = build2 (MODIFY_EXPR, void_type_node, fd->v, t);
+ t = build_gimple_modify_stmt (fd->v, t);
gimplify_and_add (t, &list);
t = build2 (fd->cond_code, boolean_type_node, fd->v, e);
{
tree l0, l1, l2, l3, l4, n, s0, e0, e, t;
tree trip, nthreads, threadid;
- tree type, utype;
+ tree type;
basic_block entry_bb, exit_bb, body_bb, seq_start_bb, iter_part_bb;
basic_block trip_update_bb, cont_bb, fin_bb;
tree list;
block_stmt_iterator si;
type = TREE_TYPE (fd->v);
- utype = lang_hooks.types.unsigned_type (type);
entry_bb = region->entry;
iter_part_bb = create_empty_bb (entry_bb);
/* Trip and adjustment setup goes in ENTRY_BB. */
list = alloc_stmt_list ();
- t = built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS];
- t = build_function_call_expr (t, NULL);
- t = fold_convert (utype, t);
+ t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS], 0);
+ t = fold_convert (type, t);
nthreads = get_formal_tmp_var (t, &list);
- t = built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM];
- t = build_function_call_expr (t, NULL);
- t = fold_convert (utype, t);
+ t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
+ t = fold_convert (type, t);
threadid = get_formal_tmp_var (t, &list);
fd->n1 = fold_convert (type, fd->n1);
if (!is_gimple_val (fd->step))
fd->step = get_formal_tmp_var (fd->step, &list);
- fd->chunk_size = fold_convert (utype, fd->chunk_size);
+ fd->chunk_size = fold_convert (type, fd->chunk_size);
if (!is_gimple_val (fd->chunk_size))
fd->chunk_size = get_formal_tmp_var (fd->chunk_size, &list);
t = fold_build2 (PLUS_EXPR, type, t, fd->n2);
t = fold_build2 (MINUS_EXPR, type, t, fd->n1);
t = fold_build2 (TRUNC_DIV_EXPR, type, t, fd->step);
- t = fold_convert (utype, t);
+ t = fold_convert (type, t);
if (is_gimple_val (t))
n = t;
else
n = get_formal_tmp_var (t, &list);
- t = build_int_cst (utype, 0);
+ t = build_int_cst (type, 0);
trip = get_initialized_tmp_var (t, &list, NULL);
si = bsi_last (entry_bb);
/* Iteration space partitioning goes in ITER_PART_BB. */
list = alloc_stmt_list ();
- t = build2 (MULT_EXPR, utype, trip, nthreads);
- t = build2 (PLUS_EXPR, utype, t, threadid);
- t = build2 (MULT_EXPR, utype, t, fd->chunk_size);
+ t = build2 (MULT_EXPR, type, trip, nthreads);
+ t = build2 (PLUS_EXPR, type, t, threadid);
+ t = build2 (MULT_EXPR, type, t, fd->chunk_size);
s0 = get_formal_tmp_var (t, &list);
- t = build2 (PLUS_EXPR, utype, s0, fd->chunk_size);
- t = build2 (MIN_EXPR, utype, t, n);
+ t = build2 (PLUS_EXPR, type, s0, fd->chunk_size);
+ t = build2 (MIN_EXPR, type, t, n);
e0 = get_formal_tmp_var (t, &list);
t = build2 (LT_EXPR, boolean_type_node, s0, n);
t = fold_convert (type, s0);
t = build2 (MULT_EXPR, type, t, fd->step);
t = build2 (PLUS_EXPR, type, t, fd->n1);
- t = build2 (MODIFY_EXPR, void_type_node, fd->v, t);
+ t = build_gimple_modify_stmt (fd->v, t);
gimplify_and_add (t, &list);
t = fold_convert (type, e0);
list = alloc_stmt_list ();
t = build2 (PLUS_EXPR, type, fd->v, fd->step);
- t = build2 (MODIFY_EXPR, void_type_node, fd->v, t);
+ t = build_gimple_modify_stmt (fd->v, t);
gimplify_and_add (t, &list);
t = build2 (fd->cond_code, boolean_type_node, fd->v, e);
/* Trip update code goes into TRIP_UPDATE_BB. */
list = alloc_stmt_list ();
- t = build_int_cst (utype, 1);
- t = build2 (PLUS_EXPR, utype, trip, t);
- t = build2 (MODIFY_EXPR, void_type_node, trip, t);
+ t = build_int_cst (type, 1);
+ t = build2 (PLUS_EXPR, type, trip, t);
+ t = build_gimple_modify_stmt (trip, t);
gimplify_and_add (t, &list);
si = bsi_start (trip_update_bb);
extract_omp_for_data (last_stmt (region->entry), &fd);
region->sched_kind = fd.sched_kind;
- if (fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC && !fd.have_ordered)
+ if (fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
+ && !fd.have_ordered
+ && region->cont
+ && region->exit)
{
if (fd.chunk_size == NULL)
expand_omp_for_static_nochunk (region, &fd);
entry_bb = region->entry;
l0_bb = create_empty_bb (entry_bb);
+ l0 = tree_block_label (l0_bb);
+
+ gcc_assert ((region->cont != NULL) ^ (region->exit == NULL));
l1_bb = region->cont;
- l2_bb = single_succ (l1_bb);
- default_bb = create_empty_bb (l1_bb->prev_bb);
- exit_bb = region->exit;
+ if (l1_bb)
+ {
+ l2_bb = single_succ (l1_bb);
+ default_bb = create_empty_bb (l1_bb->prev_bb);
- l0 = tree_block_label (l0_bb);
- l1 = tree_block_label (l1_bb);
+ l1 = tree_block_label (l1_bb);
+ }
+ else
+ {
+ l2_bb = create_empty_bb (l0_bb);
+ default_bb = l2_bb;
+
+ l1 = NULL;
+ }
l2 = tree_block_label (l2_bb);
+ exit_bb = region->exit;
+
v = create_tmp_var (unsigned_type_node, ".section");
/* We will build a switch() with enough cases for all the
/* If we are not inside a combined parallel+sections region,
call GOMP_sections_start. */
t = build_int_cst (unsigned_type_node, len);
- t = tree_cons (NULL, t, NULL);
u = built_in_decls[BUILT_IN_GOMP_SECTIONS_START];
- t = build_function_call_expr (u, t);
- t = build2 (MODIFY_EXPR, void_type_node, v, t);
+ t = build_call_expr (u, 1, t);
+ t = build_gimple_modify_stmt (v, t);
bsi_insert_after (&si, t, BSI_SAME_STMT);
}
bsi_remove (&si, true);
build_int_cst (unsigned_type_node, 0), NULL, l2);
TREE_VEC_ELT (label_vec, 0) = t;
make_edge (l0_bb, l2_bb, 0);
-
+
/* Convert each OMP_SECTION into a CASE_LABEL_EXPR. */
for (inner = region->inner, i = 1; inner; inner = inner->next, ++i)
{
gcc_assert (i < len || OMP_SECTION_LAST (bsi_stmt (si)));
bsi_remove (&si, true);
- si = bsi_last (s_exit_bb);
- gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_RETURN);
- bsi_remove (&si, true);
-
e = single_pred_edge (s_entry_bb);
e->flags = 0;
redirect_edge_pred (e, l0_bb);
single_succ_edge (s_entry_bb)->flags = EDGE_FALLTHRU;
+
+ if (s_exit_bb == NULL)
+ continue;
+
+ si = bsi_last (s_exit_bb);
+ gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_RETURN);
+ bsi_remove (&si, true);
+
single_succ_edge (s_exit_bb)->flags = EDGE_FALLTHRU;
}
make_edge (l0_bb, default_bb, 0);
si = bsi_start (default_bb);
- t = built_in_decls[BUILT_IN_TRAP];
- t = build_function_call_expr (t, NULL);
+ t = build_call_expr (built_in_decls[BUILT_IN_TRAP], 0);
bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
/* Code to get the next section goes in L1_BB. */
- si = bsi_last (l1_bb);
- gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_CONTINUE);
+ if (l1_bb)
+ {
+ si = bsi_last (l1_bb);
+ gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_CONTINUE);
- t = built_in_decls[BUILT_IN_GOMP_SECTIONS_NEXT];
- t = build_function_call_expr (t, NULL);
- t = build2 (MODIFY_EXPR, void_type_node, v, t);
- bsi_insert_after (&si, t, BSI_SAME_STMT);
- bsi_remove (&si, true);
+ t = build_call_expr (built_in_decls[BUILT_IN_GOMP_SECTIONS_NEXT], 0);
+ t = build_gimple_modify_stmt (v, t);
+ bsi_insert_after (&si, t, BSI_SAME_STMT);
+ bsi_remove (&si, true);
+ }
/* Cleanup function replaces OMP_RETURN in EXIT_BB. */
- si = bsi_last (exit_bb);
- if (OMP_RETURN_NOWAIT (bsi_stmt (si)))
- t = built_in_decls[BUILT_IN_GOMP_SECTIONS_END_NOWAIT];
- else
- t = built_in_decls[BUILT_IN_GOMP_SECTIONS_END];
- t = build_function_call_expr (t, NULL);
- bsi_insert_after (&si, t, BSI_SAME_STMT);
- bsi_remove (&si, true);
+ if (exit_bb)
+ {
+ si = bsi_last (exit_bb);
+ if (OMP_RETURN_NOWAIT (bsi_stmt (si)))
+ t = built_in_decls[BUILT_IN_GOMP_SECTIONS_END_NOWAIT];
+ else
+ t = built_in_decls[BUILT_IN_GOMP_SECTIONS_END];
+ t = build_call_expr (t, 0);
+ bsi_insert_after (&si, t, BSI_SAME_STMT);
+ bsi_remove (&si, true);
+ }
/* Connect the new blocks. */
if (is_combined_parallel (region))
else
make_edge (entry_bb, l0_bb, EDGE_FALLTHRU);
- e = single_succ_edge (l1_bb);
- redirect_edge_succ (e, l0_bb);
- e->flags = EDGE_FALLTHRU;
+ if (l1_bb)
+ {
+ e = single_succ_edge (l1_bb);
+ redirect_edge_succ (e, l0_bb);
+ e->flags = EDGE_FALLTHRU;
+ }
}
bsi_remove (&si, true);
single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
- si = bsi_last (exit_bb);
- gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_RETURN);
- bsi_remove (&si, true);
- single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
+ if (exit_bb)
+ {
+ si = bsi_last (exit_bb);
+ gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_RETURN);
+ bsi_remove (&si, true);
+ single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
+ }
}
{
tree t;
- t = built_in_decls[BUILT_IN_GOMP_SINGLE_START];
- t = build_function_call_expr (t, NULL);
+ t = build_call_expr (built_in_decls[BUILT_IN_GOMP_SINGLE_START], 0);
t = build3 (COND_EXPR, void_type_node, t,
OMP_SINGLE_BODY (single_stmt), NULL);
gimplify_and_add (t, pre_p);
static void
lower_omp_single_copy (tree single_stmt, tree *pre_p, omp_context *ctx)
{
- tree ptr_type, t, args, l0, l1, l2, copyin_seq;
+ tree ptr_type, t, l0, l1, l2, copyin_seq;
ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_copy_o");
l1 = create_artificial_label ();
l2 = create_artificial_label ();
- t = built_in_decls[BUILT_IN_GOMP_SINGLE_COPY_START];
- t = build_function_call_expr (t, NULL);
+ t = build_call_expr (built_in_decls[BUILT_IN_GOMP_SINGLE_COPY_START], 0);
t = fold_convert (ptr_type, t);
- t = build2 (MODIFY_EXPR, void_type_node, ctx->receiver_decl, t);
+ t = build_gimple_modify_stmt (ctx->receiver_decl, t);
gimplify_and_add (t, pre_p);
t = build2 (EQ_EXPR, boolean_type_node, ctx->receiver_decl,
©in_seq, ctx);
t = build_fold_addr_expr (ctx->sender_decl);
- args = tree_cons (NULL, t, NULL);
- t = built_in_decls[BUILT_IN_GOMP_SINGLE_COPY_END];
- t = build_function_call_expr (t, args);
+ t = build_call_expr (built_in_decls[BUILT_IN_GOMP_SINGLE_COPY_END], 1, t);
gimplify_and_add (t, pre_p);
t = build_and_jump (&l2);
append_to_statement_list (stmt, &BIND_EXPR_BODY (bind));
- x = built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM];
- x = build_function_call_expr (x, NULL);
+ x = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
x = build2 (EQ_EXPR, boolean_type_node, x, integer_zero_node);
x = build3 (COND_EXPR, void_type_node, x, NULL, build_and_jump (&lab));
gimplify_and_add (x, &BIND_EXPR_BODY (bind));
append_to_statement_list (stmt, &BIND_EXPR_BODY (bind));
- x = built_in_decls[BUILT_IN_GOMP_ORDERED_START];
- x = build_function_call_expr (x, NULL);
+ x = build_call_expr (built_in_decls[BUILT_IN_GOMP_ORDERED_START], 0);
gimplify_and_add (x, &BIND_EXPR_BODY (bind));
lower_omp (&OMP_ORDERED_BODY (stmt), ctx);
append_to_statement_list (OMP_ORDERED_BODY (stmt), &BIND_EXPR_BODY (bind));
OMP_ORDERED_BODY (stmt) = NULL;
- x = built_in_decls[BUILT_IN_GOMP_ORDERED_END];
- x = build_function_call_expr (x, NULL);
+ x = build_call_expr (built_in_decls[BUILT_IN_GOMP_ORDERED_END], 0);
gimplify_and_add (x, &BIND_EXPR_BODY (bind));
x = make_node (OMP_RETURN);
name = OMP_CRITICAL_NAME (stmt);
if (name)
{
- tree decl, args;
+ tree decl;
splay_tree_node n;
if (!critical_name_mutexes)
DECL_COMMON (decl) = 1;
DECL_ARTIFICIAL (decl) = 1;
DECL_IGNORED_P (decl) = 1;
- cgraph_varpool_finalize_decl (decl);
+ varpool_finalize_decl (decl);
splay_tree_insert (critical_name_mutexes, (splay_tree_key) name,
(splay_tree_value) decl);
else
decl = (tree) n->value;
- args = tree_cons (NULL, build_fold_addr_expr (decl), NULL);
lock = built_in_decls[BUILT_IN_GOMP_CRITICAL_NAME_START];
- lock = build_function_call_expr (lock, args);
+ lock = build_call_expr (lock, 1, build_fold_addr_expr (decl));
- args = tree_cons (NULL, build_fold_addr_expr (decl), NULL);
unlock = built_in_decls[BUILT_IN_GOMP_CRITICAL_NAME_END];
- unlock = build_function_call_expr (unlock, args);
+ unlock = build_call_expr (unlock, 1, build_fold_addr_expr (decl));
}
else
{
lock = built_in_decls[BUILT_IN_GOMP_CRITICAL_START];
- lock = build_function_call_expr (lock, NULL);
+ lock = build_call_expr (lock, 0);
unlock = built_in_decls[BUILT_IN_GOMP_CRITICAL_END];
- unlock = build_function_call_expr (unlock, NULL);
+ unlock = build_call_expr (unlock, 0);
}
push_gimplify_context ();
/* A subroutine of lower_omp_for. Generate code to emit the predicate
for a lastprivate clause. Given a loop control predicate of (V
cond N2), we gate the clause on (!(V cond N2)). The lowered form
- is appended to *BODY_P. */
+ is appended to *DLIST, iterator initialization is appended to
+ *BODY_P. */
static void
lower_omp_for_lastprivate (struct omp_for_data *fd, tree *body_p,
- struct omp_context *ctx)
+ tree *dlist, struct omp_context *ctx)
{
- tree clauses, cond;
+ tree clauses, cond, stmts, vinit, t;
enum tree_code cond_code;
cond_code = fd->cond_code;
cond = build2 (cond_code, boolean_type_node, fd->v, fd->n2);
clauses = OMP_FOR_CLAUSES (fd->for_stmt);
- lower_lastprivate_clauses (clauses, cond, body_p, ctx);
+ stmts = NULL;
+ lower_lastprivate_clauses (clauses, cond, &stmts, ctx);
+ if (stmts != NULL)
+ {
+ append_to_statement_list (stmts, dlist);
+
+ /* Optimize: v = 0; is usually cheaper than v = some_other_constant. */
+ vinit = fd->n1;
+ if (cond_code == EQ_EXPR
+ && host_integerp (fd->n2, 0)
+ && ! integer_zerop (fd->n2))
+ vinit = build_int_cst (TREE_TYPE (fd->v), 0);
+
+ /* Initialize the iterator variable, so that threads that don't execute
+ any iterations don't execute the lastprivate clauses by accident. */
+ t = build_gimple_modify_stmt (fd->v, vinit);
+ gimplify_and_add (t, body_p);
+ }
}
We just need to make sure that VAL1, VAL2 and VAL3 are lowered
using the .omp_data_s mapping, if needed. */
- rhs_p = &TREE_OPERAND (OMP_FOR_INIT (stmt), 1);
+ rhs_p = &GIMPLE_STMT_OPERAND (OMP_FOR_INIT (stmt), 1);
if (!is_gimple_min_invariant (*rhs_p))
*rhs_p = get_formal_tmp_var (*rhs_p, body_p);
if (!is_gimple_min_invariant (*rhs_p))
*rhs_p = get_formal_tmp_var (*rhs_p, body_p);
- rhs_p = &TREE_OPERAND (TREE_OPERAND (OMP_FOR_INCR (stmt), 1), 1);
+ rhs_p = &TREE_OPERAND (GIMPLE_STMT_OPERAND (OMP_FOR_INCR (stmt), 1), 1);
if (!is_gimple_min_invariant (*rhs_p))
*rhs_p = get_formal_tmp_var (*rhs_p, body_p);
/* Once lowered, extract the bounds and clauses. */
extract_omp_for_data (stmt, &fd);
+ lower_omp_for_lastprivate (&fd, body_p, &dlist, ctx);
+
append_to_statement_list (stmt, body_p);
append_to_statement_list (OMP_FOR_BODY (stmt), body_p);
append_to_statement_list (t, body_p);
/* After the loop, add exit clauses. */
- lower_omp_for_lastprivate (&fd, &dlist, ctx);
lower_reduction_clauses (OMP_FOR_CLAUSES (stmt), body_p, ctx);
append_to_statement_list (dlist, body_p);
if (ctx->record_type)
{
t = build_fold_addr_expr (ctx->sender_decl);
- t = build2 (MODIFY_EXPR, void_type_node, ctx->receiver_decl, t);
+ /* fixup_child_record_type might have changed receiver_decl's type. */
+ t = fold_convert (TREE_TYPE (ctx->receiver_decl), t);
+ t = build_gimple_modify_stmt (ctx->receiver_decl, t);
append_to_statement_list (t, &new_body);
}
tsi_link_before (&wi->tsi, pre, TSI_SAME_STMT);
}
+/* Copy EXP into a temporary. Insert the initialization statement before TSI. */
+
+static tree
+init_tmp_var (tree exp, tree_stmt_iterator *tsi)
+{
+ tree t, stmt;
+
+ t = create_tmp_var (TREE_TYPE (exp), NULL);
+ DECL_GIMPLE_REG_P (t) = 1;
+ stmt = build_gimple_modify_stmt (t, exp);
+ SET_EXPR_LOCUS (stmt, EXPR_LOCUS (tsi_stmt (*tsi)));
+ tsi_link_before (tsi, stmt, TSI_SAME_STMT);
+
+ return t;
+}
+
+/* Similarly, but copy from the temporary and insert the statement
+ after the iterator. */
+
+static tree
+save_tmp_var (tree exp, tree_stmt_iterator *tsi)
+{
+ tree t, stmt;
+
+ t = create_tmp_var (TREE_TYPE (exp), NULL);
+ DECL_GIMPLE_REG_P (t) = 1;
+ stmt = build_gimple_modify_stmt (exp, t);
+ SET_EXPR_LOCUS (stmt, EXPR_LOCUS (tsi_stmt (*tsi)));
+ tsi_link_after (tsi, stmt, TSI_SAME_STMT);
+
+ return t;
+}
/* Callback for walk_stmts. Lower the OpenMP directive pointed by TP. */
case VAR_DECL:
if (ctx && DECL_HAS_VALUE_EXPR_P (t))
- lower_regimplify (tp, wi);
+ {
+ lower_regimplify (&t, wi);
+ if (wi->val_only)
+ {
+ if (wi->is_lhs)
+ t = save_tmp_var (t, &wi->tsi);
+ else
+ t = init_tmp_var (t, &wi->tsi);
+ }
+ *tp = t;
+ }
break;
case ADDR_EXPR: