marshalling to implement data sharing and copying clauses.
Contributed by Diego Novillo <dnovillo@redhat.com>
- Copyright (C) 2005, 2006, 2007 Free Software Foundation, Inc.
+ Copyright (C) 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
-Software Foundation; either version 2, or (at your option) any later
+Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
for more details.
You should have received a copy of the GNU General Public License
-along with GCC; see the file COPYING. If not, write to the Free
-Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
-02110-1301, USA. */
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
#include "config.h"
#include "system.h"
#include "ggc.h"
#include "except.h"
#include "splay-tree.h"
-
+#include "optabs.h"
+#include "cfgloop.h"
/* Lowering of OpenMP parallel and workshare constructs proceeds in two
phases. The first phase scans the function looking for OMP statements
/* Find an OpenMP clause of type KIND within CLAUSES. */
-static tree
+tree
find_omp_clause (tree clauses, enum tree_code kind)
{
for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
static void
extract_omp_for_data (tree for_stmt, struct omp_for_data *fd)
{
- tree t;
+ tree t, var;
fd->for_stmt = for_stmt;
fd->pre = NULL;
t = OMP_FOR_INIT (for_stmt);
gcc_assert (TREE_CODE (t) == GIMPLE_MODIFY_STMT);
fd->v = GIMPLE_STMT_OPERAND (t, 0);
- gcc_assert (DECL_P (fd->v));
+ gcc_assert (SSA_VAR_P (fd->v));
gcc_assert (TREE_CODE (TREE_TYPE (fd->v)) == INTEGER_TYPE);
+ var = TREE_CODE (fd->v) == SSA_NAME ? SSA_NAME_VAR (fd->v) : fd->v;
fd->n1 = GIMPLE_STMT_OPERAND (t, 1);
t = OMP_FOR_COND (for_stmt);
fd->cond_code = TREE_CODE (t);
- gcc_assert (TREE_OPERAND (t, 0) == fd->v);
+ gcc_assert (TREE_OPERAND (t, 0) == var);
fd->n2 = TREE_OPERAND (t, 1);
switch (fd->cond_code)
{
t = OMP_FOR_INCR (fd->for_stmt);
gcc_assert (TREE_CODE (t) == GIMPLE_MODIFY_STMT);
- gcc_assert (GIMPLE_STMT_OPERAND (t, 0) == fd->v);
+ gcc_assert (GIMPLE_STMT_OPERAND (t, 0) == var);
t = GIMPLE_STMT_OPERAND (t, 1);
- gcc_assert (TREE_OPERAND (t, 0) == fd->v);
+ gcc_assert (TREE_OPERAND (t, 0) == var);
switch (TREE_CODE (t))
{
case PLUS_EXPR:
}
else if (TREE_CODE (ws_stmt) == OMP_SECTIONS)
{
- basic_block bb = bb_for_stmt (ws_stmt);
- t = build_int_cst (unsigned_type_node, EDGE_COUNT (bb->succs));
+ /* Number of sections is equal to the number of edges from the
+ OMP_SECTIONS_SWITCH statement, except for the one to the exit
+ of the sections region. */
+ basic_block bb = single_succ (bb_for_stmt (ws_stmt));
+ t = build_int_cst (unsigned_type_node, EDGE_COUNT (bb->succs) - 1);
t = tree_cons (NULL, t, NULL);
return t;
}
basic_block ws_entry_bb, ws_exit_bb;
if (region == NULL || region->inner == NULL
- || region->exit == NULL || region->inner->exit == NULL)
+ || region->exit == NULL || region->inner->exit == NULL
+ || region->inner->cont == NULL)
return;
/* We only support parallel+for and parallel+sections. */
if (single_succ (par_entry_bb) == ws_entry_bb
&& single_succ (ws_exit_bb) == par_exit_bb
- && workshare_safe_to_combine_p (par_entry_bb, ws_entry_bb))
+ && workshare_safe_to_combine_p (par_entry_bb, ws_entry_bb)
+ && (OMP_PARALLEL_COMBINED (last_stmt (par_entry_bb))
+ || (last_and_only_stmt (ws_entry_bb)
+ && last_and_only_stmt (par_exit_bb))))
{
- tree ws_stmt = last_stmt (region->inner->entry);
+ tree ws_stmt = last_stmt (ws_entry_bb);
if (region->inner->type == OMP_FOR)
{
/* Return true if EXPR is variable sized. */
static inline bool
-is_variable_sized (tree expr)
+is_variable_sized (const_tree expr)
{
return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
}
}
static inline tree
-maybe_lookup_decl (tree var, omp_context *ctx)
+maybe_lookup_decl (const_tree var, omp_context *ctx)
{
tree *n;
n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
return n ? (tree) n->value : NULL_TREE;
}
-/* Return true if DECL should be copied by pointer. SHARED_P is true
- if DECL is to be shared. */
+/* Return true if DECL should be copied by pointer. SHARED_CTX is
+ the parallel context if DECL is to be shared. */
static bool
-use_pointer_for_field (tree decl, bool shared_p)
+use_pointer_for_field (const_tree decl, omp_context *shared_ctx)
{
if (AGGREGATE_TYPE_P (TREE_TYPE (decl)))
return true;
/* We can only use copy-in/copy-out semantics for shared variables
when we know the value is not accessible from an outer scope. */
- if (shared_p)
+ if (shared_ctx)
{
/* ??? Trivially accessible from anywhere. But why would we even
be passing an address in this case? Should we simply assert
address taken. */
if (TREE_ADDRESSABLE (decl))
return true;
+
+ /* Disallow copy-in/out in nested parallel if
+ decl is shared in outer parallel, otherwise
+ each thread could store the shared variable
+ in its own copy-in location, making the
+ variable no longer really shared. */
+ if (!TREE_READONLY (decl) && shared_ctx->is_nested)
+ {
+ omp_context *up;
+
+ for (up = shared_ctx->outer; up; up = up->outer)
+ if (maybe_lookup_decl (decl, up))
+ break;
+
+ if (up && is_parallel_ctx (up))
+ {
+ tree c;
+
+ for (c = OMP_PARALLEL_CLAUSES (up->stmt);
+ c; c = OMP_CLAUSE_CHAIN (c))
+ if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
+ && OMP_CLAUSE_DECL (c) == decl)
+ break;
+
+ if (c)
+ return true;
+ }
+ }
}
return false;
}
-/* Construct a new automatic decl similar to VAR. */
+/* Create a new VAR_DECL and copy information from VAR to it. */
-static tree
-omp_copy_decl_2 (tree var, tree name, tree type, omp_context *ctx)
+tree
+copy_var_decl (tree var, tree name, tree type)
{
tree copy = build_decl (VAR_DECL, name, type);
TREE_ADDRESSABLE (copy) = TREE_ADDRESSABLE (var);
+ TREE_THIS_VOLATILE (copy) = TREE_THIS_VOLATILE (var);
DECL_GIMPLE_REG_P (copy) = DECL_GIMPLE_REG_P (var);
+ DECL_NO_TBAA_P (copy) = DECL_NO_TBAA_P (var);
DECL_ARTIFICIAL (copy) = DECL_ARTIFICIAL (var);
DECL_IGNORED_P (copy) = DECL_IGNORED_P (var);
+ DECL_CONTEXT (copy) = DECL_CONTEXT (var);
+ DECL_SOURCE_LOCATION (copy) = DECL_SOURCE_LOCATION (var);
TREE_USED (copy) = 1;
- DECL_CONTEXT (copy) = current_function_decl;
DECL_SEEN_IN_BIND_EXPR_P (copy) = 1;
+ return copy;
+}
+
+/* Construct a new automatic decl similar to VAR. */
+
+static tree
+omp_copy_decl_2 (tree var, tree name, tree type, omp_context *ctx)
+{
+ tree copy = copy_var_decl (var, name, type);
+
+ DECL_CONTEXT (copy) = current_function_decl;
TREE_CHAIN (copy) = ctx->block_vars;
ctx->block_vars = copy;
}
else if (is_parallel_ctx (ctx))
{
- bool by_ref = use_pointer_for_field (var, false);
+ bool by_ref = use_pointer_for_field (var, NULL);
x = build_receiver_ref (var, by_ref, ctx);
}
else if (ctx->outer)
case OMP_CLAUSE_SHARED:
gcc_assert (is_parallel_ctx (ctx));
decl = OMP_CLAUSE_DECL (c);
- gcc_assert (!is_variable_sized (decl));
- by_ref = use_pointer_for_field (decl, true);
+ gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl))
+ || !is_variable_sized (decl));
+ by_ref = use_pointer_for_field (decl, ctx);
/* Global variables don't need to be copied,
the receiver side will use them directly. */
if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
&& ! is_global_var (maybe_lookup_decl_in_outer_ctx (decl,
ctx)))
{
- by_ref = use_pointer_for_field (decl, false);
+ by_ref = use_pointer_for_field (decl, NULL);
install_var_field (decl, by_ref, ctx);
}
install_var_local (decl, ctx);
case OMP_CLAUSE_COPYIN:
decl = OMP_CLAUSE_DECL (c);
- by_ref = use_pointer_for_field (decl, false);
+ by_ref = use_pointer_for_field (decl, NULL);
install_var_field (decl, by_ref, ctx);
break;
/* Allocate memory for the function structure. The call to
allocate_struct_function clobbers CFUN, so we need to restore
it afterward. */
- allocate_struct_function (decl);
+ push_struct_function (decl);
DECL_SOURCE_LOCATION (decl) = EXPR_LOCATION (ctx->stmt);
cfun->function_end_locus = EXPR_LOCATION (ctx->stmt);
- cfun = ctx->cb.src_cfun;
+ pop_cfun ();
}
/* Build a call to GOMP_barrier. */
-static void
-build_omp_barrier (tree *stmt_list)
+static tree
+build_omp_barrier (void)
{
- tree t = build_call_expr (built_in_decls[BUILT_IN_GOMP_BARRIER], 0);
- gimplify_and_add (t, stmt_list);
+ return build_call_expr (built_in_decls[BUILT_IN_GOMP_BARRIER], 0);
}
/* If a context was created for STMT when it was scanned, return it. */
tree t;
omp_context *up;
- gcc_assert (ctx->is_nested);
-
for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
t = maybe_lookup_decl (decl, up);
- gcc_assert (t);
+ gcc_assert (!ctx->is_nested || t || is_global_var (decl));
- return t;
+ return t ? t : decl;
}
tree t = NULL;
omp_context *up;
- if (ctx->is_nested)
- for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
- t = maybe_lookup_decl (decl, up);
+ for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
+ t = maybe_lookup_decl (decl, up);
return t ? t : decl;
}
/* Set up the DECL_VALUE_EXPR for shared variables now. This
needs to be delayed until after fixup_child_record_type so
that we get the correct type during the dereference. */
- by_ref = use_pointer_for_field (var, true);
+ by_ref = use_pointer_for_field (var, ctx);
x = build_receiver_ref (var, by_ref, ctx);
SET_DECL_VALUE_EXPR (new_var, x);
DECL_HAS_VALUE_EXPR_P (new_var) = 1;
break;
case OMP_CLAUSE_COPYIN:
- by_ref = use_pointer_for_field (var, false);
+ by_ref = use_pointer_for_field (var, NULL);
x = build_receiver_ref (var, by_ref, ctx);
x = lang_hooks.decls.omp_clause_assign_op (c, new_var, x);
append_to_statement_list (x, ©in_seq);
lastprivate clauses we need to ensure the lastprivate copying
happens after firstprivate copying in all threads. */
if (copyin_by_ref || lastprivate_firstprivate)
- build_omp_barrier (ilist);
+ gimplify_and_add (build_omp_barrier (), ilist);
}
continue;
var = OMP_CLAUSE_DECL (c);
- by_ref = use_pointer_for_field (var, false);
+ by_ref = use_pointer_for_field (var, NULL);
ref = build_sender_ref (var, ctx);
- x = (ctx->is_nested) ? lookup_decl_in_outer_ctx (var, ctx) : var;
+ x = lookup_decl_in_outer_ctx (var, ctx);
x = by_ref ? build_fold_addr_expr (x) : x;
x = build_gimple_modify_stmt (ref, x);
gimplify_and_add (x, slist);
continue;
}
- var = val = OMP_CLAUSE_DECL (c);
- if (ctx->is_nested)
- var = lookup_decl_in_outer_ctx (val, ctx);
+ val = OMP_CLAUSE_DECL (c);
+ var = lookup_decl_in_outer_ctx (val, ctx);
if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYIN
&& is_global_var (var))
continue;
if (is_variable_sized (val))
continue;
- by_ref = use_pointer_for_field (val, false);
+ by_ref = use_pointer_for_field (val, NULL);
switch (OMP_CLAUSE_CODE (c))
{
if (!nvar || !DECL_HAS_VALUE_EXPR_P (nvar))
continue;
- var = ovar;
-
/* If CTX is a nested parallel directive. Find the immediately
enclosing parallel or workshare construct that contains a
mapping for OVAR. */
- if (ctx->is_nested)
- var = lookup_decl_in_outer_ctx (ovar, ctx);
+ var = lookup_decl_in_outer_ctx (ovar, ctx);
- if (use_pointer_for_field (ovar, true))
+ if (use_pointer_for_field (ovar, ctx))
{
x = build_sender_ref (ovar, ctx);
var = build_fold_addr_expr (var);
expand_parallel_call (struct omp_region *region, basic_block bb,
tree entry_stmt, tree ws_args)
{
- tree t, t1, t2, val, cond, c, list, clauses;
+ tree t, t1, t2, val, cond, c, clauses;
block_stmt_iterator si;
int start_ix;
clauses = OMP_PARALLEL_CLAUSES (entry_stmt);
- push_gimplify_context ();
/* Determine what flavor of GOMP_parallel_start we will be
emitting. */
cond = gimple_boolify (cond);
if (integer_zerop (val))
- val = build2 (EQ_EXPR, unsigned_type_node, cond,
- build_int_cst (TREE_TYPE (cond), 0));
+ val = fold_build2 (EQ_EXPR, unsigned_type_node, cond,
+ build_int_cst (TREE_TYPE (cond), 0));
else
{
basic_block cond_bb, then_bb, else_bb;
- edge e;
- tree t, then_lab, else_lab, tmp;
+ edge e, e_then, e_else;
+ tree t, tmp_then, tmp_else, tmp_join, tmp_var;
+
+ tmp_var = create_tmp_var (TREE_TYPE (val), NULL);
+ if (gimple_in_ssa_p (cfun))
+ {
+ tmp_then = make_ssa_name (tmp_var, NULL_TREE);
+ tmp_else = make_ssa_name (tmp_var, NULL_TREE);
+ tmp_join = make_ssa_name (tmp_var, NULL_TREE);
+ }
+ else
+ {
+ tmp_then = tmp_var;
+ tmp_else = tmp_var;
+ tmp_join = tmp_var;
+ }
- tmp = create_tmp_var (TREE_TYPE (val), NULL);
e = split_block (bb, NULL);
cond_bb = e->src;
bb = e->dest;
then_bb = create_empty_bb (cond_bb);
else_bb = create_empty_bb (then_bb);
- then_lab = create_artificial_label ();
- else_lab = create_artificial_label ();
+ set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
+ set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
t = build3 (COND_EXPR, void_type_node,
- cond,
- build_and_jump (&then_lab),
- build_and_jump (&else_lab));
+ cond, NULL_TREE, NULL_TREE);
si = bsi_start (cond_bb);
bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
si = bsi_start (then_bb);
- t = build1 (LABEL_EXPR, void_type_node, then_lab);
- bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
- t = build_gimple_modify_stmt (tmp, val);
+ t = build_gimple_modify_stmt (tmp_then, val);
+ if (gimple_in_ssa_p (cfun))
+ SSA_NAME_DEF_STMT (tmp_then) = t;
bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
si = bsi_start (else_bb);
- t = build1 (LABEL_EXPR, void_type_node, else_lab);
- bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
- t = build_gimple_modify_stmt (tmp,
+ t = build_gimple_modify_stmt (tmp_else,
build_int_cst (unsigned_type_node, 1));
+ if (gimple_in_ssa_p (cfun))
+ SSA_NAME_DEF_STMT (tmp_else) = t;
bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
- make_edge (then_bb, bb, EDGE_FALLTHRU);
- make_edge (else_bb, bb, EDGE_FALLTHRU);
+ e_then = make_edge (then_bb, bb, EDGE_FALLTHRU);
+ e_else = make_edge (else_bb, bb, EDGE_FALLTHRU);
+
+ if (gimple_in_ssa_p (cfun))
+ {
+ tree phi = create_phi_node (tmp_join, bb);
+ SSA_NAME_DEF_STMT (tmp_join) = phi;
+ add_phi_arg (phi, tmp_then, e_then);
+ add_phi_arg (phi, tmp_else, e_else);
+ }
- val = tmp;
+ val = tmp_join;
}
- list = NULL_TREE;
- val = get_formal_tmp_var (val, &list);
si = bsi_start (bb);
- bsi_insert_after (&si, list, BSI_CONTINUE_LINKING);
+ val = force_gimple_operand_bsi (&si, val, true, NULL_TREE,
+ false, BSI_CONTINUE_LINKING);
}
- list = NULL_TREE;
+ si = bsi_last (bb);
t = OMP_PARALLEL_DATA_ARG (entry_stmt);
if (t == NULL)
t1 = null_pointer_node;
else
t = build_call_expr (built_in_decls[start_ix], 3, t2, t1, val);
- gimplify_and_add (t, &list);
+ force_gimple_operand_bsi (&si, t, true, NULL_TREE,
+ false, BSI_CONTINUE_LINKING);
t = OMP_PARALLEL_DATA_ARG (entry_stmt);
if (t == NULL)
else
t = build_fold_addr_expr (t);
t = build_call_expr (OMP_PARALLEL_FN (entry_stmt), 1, t);
- gimplify_and_add (t, &list);
+ force_gimple_operand_bsi (&si, t, true, NULL_TREE,
+ false, BSI_CONTINUE_LINKING);
t = build_call_expr (built_in_decls[BUILT_IN_GOMP_PARALLEL_END], 0);
- gimplify_and_add (t, &list);
-
- si = bsi_last (bb);
- bsi_insert_after (&si, list, BSI_CONTINUE_LINKING);
-
- pop_gimplify_context (NULL_TREE);
+ force_gimple_operand_bsi (&si, t, true, NULL_TREE,
+ false, BSI_CONTINUE_LINKING);
}
}
}
+/* Optimize omp_get_thread_num () and omp_get_num_threads ()
+ calls. These can't be declared as const functions, but
+ within one parallel body they are constant, so they can be
+ transformed there into __builtin_omp_get_{thread_num,num_threads} ()
+ which are declared const. */
+
+static void
+optimize_omp_library_calls (void)
+{
+ basic_block bb;
+ block_stmt_iterator bsi;
+ tree thr_num_id
+ = DECL_ASSEMBLER_NAME (built_in_decls [BUILT_IN_OMP_GET_THREAD_NUM]);
+ tree num_thr_id
+ = DECL_ASSEMBLER_NAME (built_in_decls [BUILT_IN_OMP_GET_NUM_THREADS]);
+
+ FOR_EACH_BB (bb)
+ for (bsi = bsi_start (bb); !bsi_end_p (bsi); bsi_next (&bsi))
+ {
+ tree stmt = bsi_stmt (bsi);
+ tree call = get_call_expr_in (stmt);
+ tree decl;
+
+ if (call
+ && (decl = get_callee_fndecl (call))
+ && DECL_EXTERNAL (decl)
+ && TREE_PUBLIC (decl)
+ && DECL_INITIAL (decl) == NULL)
+ {
+ tree built_in;
+
+ if (DECL_NAME (decl) == thr_num_id)
+ built_in = built_in_decls [BUILT_IN_OMP_GET_THREAD_NUM];
+ else if (DECL_NAME (decl) == num_thr_id)
+ built_in = built_in_decls [BUILT_IN_OMP_GET_NUM_THREADS];
+ else
+ continue;
+
+ if (DECL_ASSEMBLER_NAME (decl) != DECL_ASSEMBLER_NAME (built_in)
+ || call_expr_nargs (call) != 0)
+ continue;
+
+ if (flag_exceptions && !TREE_NOTHROW (decl))
+ continue;
+
+ if (TREE_CODE (TREE_TYPE (decl)) != FUNCTION_TYPE
+ || TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (decl)))
+ != TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (built_in))))
+ continue;
+
+ CALL_EXPR_FN (call) = build_fold_addr_expr (built_in);
+ }
+ }
+}
+
/* Expand the OpenMP parallel directive starting at REGION. */
static void
expand_omp_parallel (struct omp_region *region)
{
basic_block entry_bb, exit_bb, new_bb;
- struct function *child_cfun, *saved_cfun;
+ struct function *child_cfun;
tree child_fn, block, t, ws_args;
block_stmt_iterator si;
tree entry_stmt;
edge e;
- bool do_cleanup_cfg = false;
entry_stmt = last_stmt (region->entry);
child_fn = OMP_PARALLEL_FN (entry_stmt);
child_cfun = DECL_STRUCT_FUNCTION (child_fn);
- saved_cfun = cfun;
+ /* If this function has been already instrumented, make sure
+ the child function isn't instrumented again. */
+ child_cfun->after_tree_profile = cfun->after_tree_profile;
entry_bb = region->entry;
exit_bb = region->exit;
bsi_remove (&si, true);
new_bb = entry_bb;
- remove_edge (entry_succ_e);
if (exit_bb)
{
exit_succ_e = single_succ_edge (exit_bb);
make_edge (new_bb, exit_succ_e->dest, EDGE_FALLTHRU);
}
- do_cleanup_cfg = true;
+ remove_edge_and_dominated_blocks (entry_succ_e);
}
else
{
{
basic_block entry_succ_bb = single_succ (entry_bb);
block_stmt_iterator si;
+ tree parcopy_stmt = NULL_TREE, arg, narg;
for (si = bsi_start (entry_succ_bb); ; bsi_next (&si))
{
&& TREE_OPERAND (arg, 0)
== OMP_PARALLEL_DATA_ARG (entry_stmt))
{
- if (GIMPLE_STMT_OPERAND (stmt, 0)
- == DECL_ARGUMENTS (child_fn))
- bsi_remove (&si, true);
- else
- GIMPLE_STMT_OPERAND (stmt, 1) = DECL_ARGUMENTS (child_fn);
+ parcopy_stmt = stmt;
break;
}
}
+
+ gcc_assert (parcopy_stmt != NULL_TREE);
+ arg = DECL_ARGUMENTS (child_fn);
+
+ if (!gimple_in_ssa_p (cfun))
+ {
+ if (GIMPLE_STMT_OPERAND (parcopy_stmt, 0) == arg)
+ bsi_remove (&si, true);
+ else
+ GIMPLE_STMT_OPERAND (parcopy_stmt, 1) = arg;
+ }
+ else
+ {
+ /* If we are in ssa form, we must load the value from the default
+ definition of the argument. That should not be defined now,
+ since the argument is not used uninitialized. */
+ gcc_assert (gimple_default_def (cfun, arg) == NULL);
+ narg = make_ssa_name (arg, build_empty_stmt ());
+ set_default_def (arg, narg);
+ GIMPLE_STMT_OPERAND (parcopy_stmt, 1) = narg;
+ update_stmt (parcopy_stmt);
+ }
}
/* Declare local variables needed in CHILD_CFUN. */
block = DECL_INITIAL (child_fn);
- BLOCK_VARS (block) = list2chain (child_cfun->unexpanded_var_list);
+ BLOCK_VARS (block) = list2chain (child_cfun->local_decls);
DECL_SAVED_TREE (child_fn) = bb_stmt_list (single_succ (entry_bb));
- /* Reset DECL_CONTEXT on locals and function arguments. */
- for (t = BLOCK_VARS (block); t; t = TREE_CHAIN (t))
- DECL_CONTEXT (t) = child_fn;
-
+ /* Reset DECL_CONTEXT on function arguments. */
for (t = DECL_ARGUMENTS (child_fn); t; t = TREE_CHAIN (t))
DECL_CONTEXT (t) = child_fn;
entry_bb = e->dest;
single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
- /* Move the parallel region into CHILD_CFUN. We need to reset
- dominance information because the expansion of the inner
- regions has invalidated it. */
- free_dominance_info (CDI_DOMINATORS);
- new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb);
- if (exit_bb)
- single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
- DECL_STRUCT_FUNCTION (child_fn)->curr_properties
- = cfun->curr_properties;
- cgraph_add_new_function (child_fn, true);
-
/* Convert OMP_RETURN into a RETURN_EXPR. */
if (exit_bb)
{
bsi_insert_after (&si, t, BSI_SAME_STMT);
bsi_remove (&si, true);
}
- }
- /* Emit a library call to launch the children threads. */
- expand_parallel_call (region, new_bb, entry_stmt, ws_args);
+ /* Move the parallel region into CHILD_CFUN. */
+
+ if (gimple_in_ssa_p (cfun))
+ {
+ push_cfun (child_cfun);
+ init_tree_ssa (child_cfun);
+ init_ssa_operands ();
+ cfun->gimple_df->in_ssa_p = true;
+ pop_cfun ();
+ }
+ new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb);
+ if (exit_bb)
+ single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
- if (do_cleanup_cfg)
- {
- /* Clean up the unreachable sub-graph we created above. */
- free_dominance_info (CDI_DOMINATORS);
- free_dominance_info (CDI_POST_DOMINATORS);
- cleanup_tree_cfg ();
+ /* Inform the callgraph about the new function. */
+ DECL_STRUCT_FUNCTION (child_fn)->curr_properties
+ = cfun->curr_properties;
+ cgraph_add_new_function (child_fn, true);
+
+ /* Fix the callgraph edges for child_cfun. Those for cfun will be
+ fixed in a following pass. */
+ push_cfun (child_cfun);
+ if (optimize)
+ optimize_omp_library_calls ();
+ rebuild_cgraph_edges ();
+
+ /* Some EH regions might become dead, see PR34608. If
+ pass_cleanup_cfg isn't the first pass to happen with the
+ new child, these dead EH edges might cause problems.
+ Clean them up now. */
+ if (flag_exceptions)
+ {
+ basic_block bb;
+ tree save_current = current_function_decl;
+ bool changed = false;
+
+ current_function_decl = child_fn;
+ FOR_EACH_BB (bb)
+ changed |= tree_purge_dead_eh_edges (bb);
+ if (changed)
+ cleanup_tree_cfg ();
+ current_function_decl = save_current;
+ }
+ pop_cfun ();
}
+
+ /* Emit a library call to launch the children threads. */
+ expand_parallel_call (region, new_bb, entry_stmt, ws_args);
+ update_ssa (TODO_update_ssa_only_virtuals);
}
L3:
If this is a combined omp parallel loop, instead of the call to
- GOMP_loop_foo_start, we emit 'goto L3'. */
+ GOMP_loop_foo_start, we call GOMP_loop_foo_next. */
static void
expand_omp_for_generic (struct omp_region *region,
enum built_in_function start_fn,
enum built_in_function next_fn)
{
- tree l0, l1, l2 = NULL, l3 = NULL;
- tree type, istart0, iend0, iend;
- tree t, list;
+ tree type, istart0, iend0, iend, phi;
+ tree t, vmain, vback;
basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb;
basic_block l2_bb = NULL, l3_bb = NULL;
block_stmt_iterator si;
bool in_combined_parallel = is_combined_parallel (region);
+ bool broken_loop = region->cont == NULL;
+ edge e, ne;
+
+ gcc_assert (!broken_loop || !in_combined_parallel);
type = TREE_TYPE (fd->v);
istart0 = create_tmp_var (long_integer_type_node, ".istart0");
iend0 = create_tmp_var (long_integer_type_node, ".iend0");
- iend = create_tmp_var (type, NULL);
TREE_ADDRESSABLE (istart0) = 1;
TREE_ADDRESSABLE (iend0) = 1;
-
- gcc_assert ((region->cont != NULL) ^ (region->exit == NULL));
+ if (gimple_in_ssa_p (cfun))
+ {
+ add_referenced_var (istart0);
+ add_referenced_var (iend0);
+ }
entry_bb = region->entry;
- l0_bb = create_empty_bb (entry_bb);
- l1_bb = single_succ (entry_bb);
-
- l0 = tree_block_label (l0_bb);
- l1 = tree_block_label (l1_bb);
-
cont_bb = region->cont;
- exit_bb = region->exit;
- if (cont_bb)
+ gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
+ gcc_assert (broken_loop
+ || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
+ l0_bb = split_edge (FALLTHRU_EDGE (entry_bb));
+ l1_bb = single_succ (l0_bb);
+ if (!broken_loop)
{
l2_bb = create_empty_bb (cont_bb);
- l3_bb = single_succ (cont_bb);
-
- l2 = tree_block_label (l2_bb);
- l3 = tree_block_label (l3_bb);
+ gcc_assert (BRANCH_EDGE (cont_bb)->dest == l1_bb);
+ gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
}
+ else
+ l2_bb = NULL;
+ l3_bb = BRANCH_EDGE (entry_bb)->dest;
+ exit_bb = region->exit;
si = bsi_last (entry_bb);
gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_FOR);
- if (!in_combined_parallel)
+ if (in_combined_parallel)
+ {
+ /* In a combined parallel loop, emit a call to
+ GOMP_loop_foo_next. */
+ t = build_call_expr (built_in_decls[next_fn], 2,
+ build_fold_addr_expr (istart0),
+ build_fold_addr_expr (iend0));
+ }
+ else
{
tree t0, t1, t2, t3, t4;
/* If this is not a combined parallel loop, emit a call to
GOMP_loop_foo_start in ENTRY_BB. */
- list = alloc_stmt_list ();
t4 = build_fold_addr_expr (iend0);
t3 = build_fold_addr_expr (istart0);
t2 = fold_convert (long_integer_type_node, fd->step);
else
t = build_call_expr (built_in_decls[start_fn], 5,
t0, t1, t2, t3, t4);
- t = get_formal_tmp_var (t, &list);
- if (cont_bb)
- {
- t = build3 (COND_EXPR, void_type_node, t, build_and_jump (&l0),
- build_and_jump (&l3));
- append_to_statement_list (t, &list);
- }
- bsi_insert_after (&si, list, BSI_SAME_STMT);
}
+ t = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
+ true, BSI_SAME_STMT);
+ t = build3 (COND_EXPR, void_type_node, t, NULL_TREE, NULL_TREE);
+ bsi_insert_after (&si, t, BSI_SAME_STMT);
+
+ /* Remove the OMP_FOR statement. */
bsi_remove (&si, true);
/* Iteration setup for sequential loop goes in L0_BB. */
- list = alloc_stmt_list ();
+ si = bsi_start (l0_bb);
t = fold_convert (type, istart0);
+ t = force_gimple_operand_bsi (&si, t, false, NULL_TREE,
+ false, BSI_CONTINUE_LINKING);
t = build_gimple_modify_stmt (fd->v, t);
- gimplify_and_add (t, &list);
+ bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
+ if (gimple_in_ssa_p (cfun))
+ SSA_NAME_DEF_STMT (fd->v) = t;
t = fold_convert (type, iend0);
- t = build_gimple_modify_stmt (iend, t);
- gimplify_and_add (t, &list);
-
- si = bsi_start (l0_bb);
- bsi_insert_after (&si, list, BSI_CONTINUE_LINKING);
+ iend = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
+ false, BSI_CONTINUE_LINKING);
- /* Handle the rare case where BODY doesn't ever return. */
- if (cont_bb == NULL)
+ if (!broken_loop)
{
- remove_edge (single_succ_edge (entry_bb));
- make_edge (entry_bb, l0_bb, EDGE_FALLTHRU);
- make_edge (l0_bb, l1_bb, EDGE_FALLTHRU);
- return;
- }
-
- /* Code to control the increment and predicate for the sequential
- loop goes in the first half of EXIT_BB (we split EXIT_BB so
- that we can inherit all the edges going out of the loop
- body). */
- list = alloc_stmt_list ();
-
- t = build2 (PLUS_EXPR, type, fd->v, fd->step);
- t = build_gimple_modify_stmt (fd->v, t);
- gimplify_and_add (t, &list);
+ /* Code to control the increment and predicate for the sequential
+ loop goes in the CONT_BB. */
+ si = bsi_last (cont_bb);
+ t = bsi_stmt (si);
+ gcc_assert (TREE_CODE (t) == OMP_CONTINUE);
+ vmain = TREE_OPERAND (t, 1);
+ vback = TREE_OPERAND (t, 0);
+
+ t = fold_build2 (PLUS_EXPR, type, vmain, fd->step);
+ t = force_gimple_operand_bsi (&si, t, false, NULL_TREE,
+ true, BSI_SAME_STMT);
+ t = build_gimple_modify_stmt (vback, t);
+ bsi_insert_before (&si, t, BSI_SAME_STMT);
+ if (gimple_in_ssa_p (cfun))
+ SSA_NAME_DEF_STMT (vback) = t;
- t = build2 (fd->cond_code, boolean_type_node, fd->v, iend);
- t = get_formal_tmp_var (t, &list);
- t = build3 (COND_EXPR, void_type_node, t, build_and_jump (&l1),
- build_and_jump (&l2));
- append_to_statement_list (t, &list);
+ t = build2 (fd->cond_code, boolean_type_node, vback, iend);
+ t = build3 (COND_EXPR, void_type_node, t, NULL_TREE, NULL_TREE);
+ bsi_insert_before (&si, t, BSI_SAME_STMT);
- si = bsi_last (cont_bb);
- bsi_insert_after (&si, list, BSI_SAME_STMT);
- gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_CONTINUE);
- bsi_remove (&si, true);
+ /* Remove OMP_CONTINUE. */
+ bsi_remove (&si, true);
- /* Emit code to get the next parallel iteration in L2_BB. */
- list = alloc_stmt_list ();
+ /* Emit code to get the next parallel iteration in L2_BB. */
+ si = bsi_start (l2_bb);
- t = build_call_expr (built_in_decls[next_fn], 2,
- build_fold_addr_expr (istart0),
- build_fold_addr_expr (iend0));
- t = get_formal_tmp_var (t, &list);
- t = build3 (COND_EXPR, void_type_node, t, build_and_jump (&l0),
- build_and_jump (&l3));
- append_to_statement_list (t, &list);
-
- si = bsi_start (l2_bb);
- bsi_insert_after (&si, list, BSI_CONTINUE_LINKING);
+ t = build_call_expr (built_in_decls[next_fn], 2,
+ build_fold_addr_expr (istart0),
+ build_fold_addr_expr (iend0));
+ t = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
+ false, BSI_CONTINUE_LINKING);
+ t = build3 (COND_EXPR, void_type_node, t, NULL_TREE, NULL_TREE);
+ bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
+ }
/* Add the loop cleanup function. */
si = bsi_last (exit_bb);
bsi_remove (&si, true);
/* Connect the new blocks. */
- remove_edge (single_succ_edge (entry_bb));
- if (in_combined_parallel)
- make_edge (entry_bb, l2_bb, EDGE_FALLTHRU);
- else
+ find_edge (entry_bb, l0_bb)->flags = EDGE_TRUE_VALUE;
+ find_edge (entry_bb, l3_bb)->flags = EDGE_FALSE_VALUE;
+
+ if (!broken_loop)
{
- make_edge (entry_bb, l0_bb, EDGE_TRUE_VALUE);
- make_edge (entry_bb, l3_bb, EDGE_FALSE_VALUE);
+ e = find_edge (cont_bb, l3_bb);
+ ne = make_edge (l2_bb, l3_bb, EDGE_FALSE_VALUE);
+
+ for (phi = phi_nodes (l3_bb); phi; phi = PHI_CHAIN (phi))
+ SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, ne),
+ PHI_ARG_DEF_FROM_EDGE (phi, e));
+ remove_edge (e);
+
+ find_edge (cont_bb, l1_bb)->flags = EDGE_TRUE_VALUE;
+ make_edge (cont_bb, l2_bb, EDGE_FALSE_VALUE);
+ make_edge (l2_bb, l0_bb, EDGE_TRUE_VALUE);
+
+ set_immediate_dominator (CDI_DOMINATORS, l2_bb,
+ recompute_dominator (CDI_DOMINATORS, l2_bb));
+ set_immediate_dominator (CDI_DOMINATORS, l3_bb,
+ recompute_dominator (CDI_DOMINATORS, l3_bb));
+ set_immediate_dominator (CDI_DOMINATORS, l0_bb,
+ recompute_dominator (CDI_DOMINATORS, l0_bb));
+ set_immediate_dominator (CDI_DOMINATORS, l1_bb,
+ recompute_dominator (CDI_DOMINATORS, l1_bb));
}
-
- make_edge (l0_bb, l1_bb, EDGE_FALLTHRU);
-
- remove_edge (single_succ_edge (cont_bb));
- make_edge (cont_bb, l1_bb, EDGE_TRUE_VALUE);
- make_edge (cont_bb, l2_bb, EDGE_FALSE_VALUE);
-
- make_edge (l2_bb, l0_bb, EDGE_TRUE_VALUE);
- make_edge (l2_bb, l3_bb, EDGE_FALSE_VALUE);
}
q += (q * nthreads != n);
s0 = q * threadid;
e0 = min(s0 + q, n);
+ V = s0 * STEP + N1;
if (s0 >= e0) goto L2; else goto L0;
L0:
- V = s0 * STEP + N1;
e = e0 * STEP + N1;
L1:
BODY;
expand_omp_for_static_nochunk (struct omp_region *region,
struct omp_for_data *fd)
{
- tree l0, l1, l2, n, q, s0, e0, e, t, nthreads, threadid;
- tree type, list;
+ tree n, q, s0, e0, e, t, nthreads, threadid;
+ tree type, vmain, vback;
basic_block entry_bb, exit_bb, seq_start_bb, body_bb, cont_bb;
basic_block fin_bb;
block_stmt_iterator si;
type = TREE_TYPE (fd->v);
entry_bb = region->entry;
- seq_start_bb = create_empty_bb (entry_bb);
- body_bb = single_succ (entry_bb);
cont_bb = region->cont;
- fin_bb = single_succ (cont_bb);
+ gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
+ gcc_assert (BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
+ seq_start_bb = split_edge (FALLTHRU_EDGE (entry_bb));
+ body_bb = single_succ (seq_start_bb);
+ gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
+ gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
+ fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
exit_bb = region->exit;
- l0 = tree_block_label (seq_start_bb);
- l1 = tree_block_label (body_bb);
- l2 = tree_block_label (fin_bb);
-
/* Iteration space partitioning goes in ENTRY_BB. */
- list = alloc_stmt_list ();
+ si = bsi_last (entry_bb);
+ gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_FOR);
t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS], 0);
t = fold_convert (type, t);
- nthreads = get_formal_tmp_var (t, &list);
+ nthreads = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
+ true, BSI_SAME_STMT);
t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
t = fold_convert (type, t);
- threadid = get_formal_tmp_var (t, &list);
+ threadid = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
+ true, BSI_SAME_STMT);
- fd->n1 = fold_convert (type, fd->n1);
- if (!is_gimple_val (fd->n1))
- fd->n1 = get_formal_tmp_var (fd->n1, &list);
+ fd->n1 = force_gimple_operand_bsi (&si,
+ fold_convert (type, fd->n1),
+ true, NULL_TREE,
+ true, BSI_SAME_STMT);
- fd->n2 = fold_convert (type, fd->n2);
- if (!is_gimple_val (fd->n2))
- fd->n2 = get_formal_tmp_var (fd->n2, &list);
+ fd->n2 = force_gimple_operand_bsi (&si,
+ fold_convert (type, fd->n2),
+ true, NULL_TREE,
+ true, BSI_SAME_STMT);
- fd->step = fold_convert (type, fd->step);
- if (!is_gimple_val (fd->step))
- fd->step = get_formal_tmp_var (fd->step, &list);
+ fd->step = force_gimple_operand_bsi (&si,
+ fold_convert (type, fd->step),
+ true, NULL_TREE,
+ true, BSI_SAME_STMT);
t = build_int_cst (type, (fd->cond_code == LT_EXPR ? -1 : 1));
t = fold_build2 (PLUS_EXPR, type, fd->step, t);
t = fold_build2 (MINUS_EXPR, type, t, fd->n1);
t = fold_build2 (TRUNC_DIV_EXPR, type, t, fd->step);
t = fold_convert (type, t);
- if (is_gimple_val (t))
- n = t;
- else
- n = get_formal_tmp_var (t, &list);
+ n = force_gimple_operand_bsi (&si, t, true, NULL_TREE, true, BSI_SAME_STMT);
- t = build2 (TRUNC_DIV_EXPR, type, n, nthreads);
- q = get_formal_tmp_var (t, &list);
+ t = fold_build2 (TRUNC_DIV_EXPR, type, n, nthreads);
+ q = force_gimple_operand_bsi (&si, t, true, NULL_TREE, true, BSI_SAME_STMT);
- t = build2 (MULT_EXPR, type, q, nthreads);
- t = build2 (NE_EXPR, type, t, n);
- t = build2 (PLUS_EXPR, type, q, t);
- q = get_formal_tmp_var (t, &list);
+ t = fold_build2 (MULT_EXPR, type, q, nthreads);
+ t = fold_build2 (NE_EXPR, type, t, n);
+ t = fold_build2 (PLUS_EXPR, type, q, t);
+ q = force_gimple_operand_bsi (&si, t, true, NULL_TREE, true, BSI_SAME_STMT);
t = build2 (MULT_EXPR, type, q, threadid);
- s0 = get_formal_tmp_var (t, &list);
+ s0 = force_gimple_operand_bsi (&si, t, true, NULL_TREE, true, BSI_SAME_STMT);
- t = build2 (PLUS_EXPR, type, s0, q);
- t = build2 (MIN_EXPR, type, t, n);
- e0 = get_formal_tmp_var (t, &list);
+ t = fold_build2 (PLUS_EXPR, type, s0, q);
+ t = fold_build2 (MIN_EXPR, type, t, n);
+ e0 = force_gimple_operand_bsi (&si, t, true, NULL_TREE, true, BSI_SAME_STMT);
t = build2 (GE_EXPR, boolean_type_node, s0, e0);
- t = build3 (COND_EXPR, void_type_node, t, build_and_jump (&l2),
- build_and_jump (&l0));
- append_to_statement_list (t, &list);
+ t = build3 (COND_EXPR, void_type_node, t, NULL_TREE, NULL_TREE);
+ bsi_insert_before (&si, t, BSI_SAME_STMT);
- si = bsi_last (entry_bb);
- gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_FOR);
- bsi_insert_after (&si, list, BSI_SAME_STMT);
+ /* Remove the OMP_FOR statement. */
bsi_remove (&si, true);
/* Setup code for sequential iteration goes in SEQ_START_BB. */
- list = alloc_stmt_list ();
+ si = bsi_start (seq_start_bb);
t = fold_convert (type, s0);
- t = build2 (MULT_EXPR, type, t, fd->step);
- t = build2 (PLUS_EXPR, type, t, fd->n1);
+ t = fold_build2 (MULT_EXPR, type, t, fd->step);
+ t = fold_build2 (PLUS_EXPR, type, t, fd->n1);
+ t = force_gimple_operand_bsi (&si, t, false, NULL_TREE,
+ false, BSI_CONTINUE_LINKING);
t = build_gimple_modify_stmt (fd->v, t);
- gimplify_and_add (t, &list);
+ bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
+ if (gimple_in_ssa_p (cfun))
+ SSA_NAME_DEF_STMT (fd->v) = t;
t = fold_convert (type, e0);
- t = build2 (MULT_EXPR, type, t, fd->step);
- t = build2 (PLUS_EXPR, type, t, fd->n1);
- e = get_formal_tmp_var (t, &list);
-
- si = bsi_start (seq_start_bb);
- bsi_insert_after (&si, list, BSI_CONTINUE_LINKING);
+ t = fold_build2 (MULT_EXPR, type, t, fd->step);
+ t = fold_build2 (PLUS_EXPR, type, t, fd->n1);
+ e = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
+ false, BSI_CONTINUE_LINKING);
/* The code controlling the sequential loop replaces the OMP_CONTINUE. */
- list = alloc_stmt_list ();
-
- t = build2 (PLUS_EXPR, type, fd->v, fd->step);
- t = build_gimple_modify_stmt (fd->v, t);
- gimplify_and_add (t, &list);
-
- t = build2 (fd->cond_code, boolean_type_node, fd->v, e);
- t = get_formal_tmp_var (t, &list);
- t = build3 (COND_EXPR, void_type_node, t, build_and_jump (&l1),
- build_and_jump (&l2));
- append_to_statement_list (t, &list);
-
si = bsi_last (cont_bb);
- gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_CONTINUE);
- bsi_insert_after (&si, list, BSI_SAME_STMT);
+ t = bsi_stmt (si);
+ gcc_assert (TREE_CODE (t) == OMP_CONTINUE);
+ vmain = TREE_OPERAND (t, 1);
+ vback = TREE_OPERAND (t, 0);
+
+ t = fold_build2 (PLUS_EXPR, type, vmain, fd->step);
+ t = force_gimple_operand_bsi (&si, t, false, NULL_TREE,
+ true, BSI_SAME_STMT);
+ t = build_gimple_modify_stmt (vback, t);
+ bsi_insert_before (&si, t, BSI_SAME_STMT);
+ if (gimple_in_ssa_p (cfun))
+ SSA_NAME_DEF_STMT (vback) = t;
+
+ t = build2 (fd->cond_code, boolean_type_node, vback, e);
+ t = build3 (COND_EXPR, void_type_node, t, NULL_TREE, NULL_TREE);
+ bsi_insert_before (&si, t, BSI_SAME_STMT);
+
+ /* Remove the OMP_CONTINUE statement. */
bsi_remove (&si, true);
/* Replace the OMP_RETURN with a barrier, or nothing. */
si = bsi_last (exit_bb);
if (!OMP_RETURN_NOWAIT (bsi_stmt (si)))
- {
- list = alloc_stmt_list ();
- build_omp_barrier (&list);
- bsi_insert_after (&si, list, BSI_SAME_STMT);
- }
+ force_gimple_operand_bsi (&si, build_omp_barrier (), false, NULL_TREE,
+ false, BSI_SAME_STMT);
bsi_remove (&si, true);
/* Connect all the blocks. */
- make_edge (seq_start_bb, body_bb, EDGE_FALLTHRU);
+ find_edge (entry_bb, seq_start_bb)->flags = EDGE_FALSE_VALUE;
+ find_edge (entry_bb, fin_bb)->flags = EDGE_TRUE_VALUE;
- remove_edge (single_succ_edge (entry_bb));
- make_edge (entry_bb, fin_bb, EDGE_TRUE_VALUE);
- make_edge (entry_bb, seq_start_bb, EDGE_FALSE_VALUE);
-
- make_edge (cont_bb, body_bb, EDGE_TRUE_VALUE);
+ find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
find_edge (cont_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
+
+ set_immediate_dominator (CDI_DOMINATORS, seq_start_bb, entry_bb);
+ set_immediate_dominator (CDI_DOMINATORS, body_bb,
+ recompute_dominator (CDI_DOMINATORS, body_bb));
+ set_immediate_dominator (CDI_DOMINATORS, fin_bb,
+ recompute_dominator (CDI_DOMINATORS, fin_bb));
}
adj = STEP + 1;
n = (adj + N2 - N1) / STEP;
trip = 0;
+ V = threadid * CHUNK * STEP + N1; -- this extra definition of V is
+ here so that V is defined
+ if the loop is not entered
L0:
s0 = (trip * nthreads + threadid) * CHUNK;
e0 = min(s0 + CHUNK, n);
static void
expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd)
{
- tree l0, l1, l2, l3, l4, n, s0, e0, e, t;
- tree trip, nthreads, threadid;
- tree type;
+ tree n, s0, e0, e, t, phi, nphi, args;
+ tree trip_var, trip_init, trip_main, trip_back, nthreads, threadid;
+ tree type, cont, v_main, v_back, v_extra;
basic_block entry_bb, exit_bb, body_bb, seq_start_bb, iter_part_bb;
basic_block trip_update_bb, cont_bb, fin_bb;
- tree list;
block_stmt_iterator si;
+ edge se, re, ene;
type = TREE_TYPE (fd->v);
entry_bb = region->entry;
- iter_part_bb = create_empty_bb (entry_bb);
- seq_start_bb = create_empty_bb (iter_part_bb);
- body_bb = single_succ (entry_bb);
+ se = split_block (entry_bb, last_stmt (entry_bb));
+ entry_bb = se->src;
+ iter_part_bb = se->dest;
cont_bb = region->cont;
- trip_update_bb = create_empty_bb (cont_bb);
- fin_bb = single_succ (cont_bb);
+ gcc_assert (EDGE_COUNT (iter_part_bb->succs) == 2);
+ gcc_assert (BRANCH_EDGE (iter_part_bb)->dest
+ == FALLTHRU_EDGE (cont_bb)->dest);
+ seq_start_bb = split_edge (FALLTHRU_EDGE (iter_part_bb));
+ body_bb = single_succ (seq_start_bb);
+ gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
+ gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
+ fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
+ trip_update_bb = split_edge (FALLTHRU_EDGE (cont_bb));
exit_bb = region->exit;
- l0 = tree_block_label (iter_part_bb);
- l1 = tree_block_label (seq_start_bb);
- l2 = tree_block_label (body_bb);
- l3 = tree_block_label (trip_update_bb);
- l4 = tree_block_label (fin_bb);
-
/* Trip and adjustment setup goes in ENTRY_BB. */
- list = alloc_stmt_list ();
+ si = bsi_last (entry_bb);
+ gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_FOR);
t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS], 0);
t = fold_convert (type, t);
- nthreads = get_formal_tmp_var (t, &list);
+ nthreads = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
+ true, BSI_SAME_STMT);
t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
t = fold_convert (type, t);
- threadid = get_formal_tmp_var (t, &list);
-
- fd->n1 = fold_convert (type, fd->n1);
- if (!is_gimple_val (fd->n1))
- fd->n1 = get_formal_tmp_var (fd->n1, &list);
-
- fd->n2 = fold_convert (type, fd->n2);
- if (!is_gimple_val (fd->n2))
- fd->n2 = get_formal_tmp_var (fd->n2, &list);
-
- fd->step = fold_convert (type, fd->step);
- if (!is_gimple_val (fd->step))
- fd->step = get_formal_tmp_var (fd->step, &list);
-
- fd->chunk_size = fold_convert (type, fd->chunk_size);
- if (!is_gimple_val (fd->chunk_size))
- fd->chunk_size = get_formal_tmp_var (fd->chunk_size, &list);
+ threadid = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
+ true, BSI_SAME_STMT);
+
+ fd->n1 = force_gimple_operand_bsi (&si, fold_convert (type, fd->n1),
+ true, NULL_TREE,
+ true, BSI_SAME_STMT);
+ fd->n2 = force_gimple_operand_bsi (&si, fold_convert (type, fd->n2),
+ true, NULL_TREE,
+ true, BSI_SAME_STMT);
+ fd->step = force_gimple_operand_bsi (&si, fold_convert (type, fd->step),
+ true, NULL_TREE,
+ true, BSI_SAME_STMT);
+ fd->chunk_size
+ = force_gimple_operand_bsi (&si, fold_convert (type,
+ fd->chunk_size),
+ true, NULL_TREE,
+ true, BSI_SAME_STMT);
t = build_int_cst (type, (fd->cond_code == LT_EXPR ? -1 : 1));
t = fold_build2 (PLUS_EXPR, type, fd->step, t);
t = fold_build2 (MINUS_EXPR, type, t, fd->n1);
t = fold_build2 (TRUNC_DIV_EXPR, type, t, fd->step);
t = fold_convert (type, t);
- if (is_gimple_val (t))
- n = t;
+ n = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
+ true, BSI_SAME_STMT);
+
+ trip_var = create_tmp_var (type, ".trip");
+ if (gimple_in_ssa_p (cfun))
+ {
+ add_referenced_var (trip_var);
+ trip_init = make_ssa_name (trip_var, NULL_TREE);
+ trip_main = make_ssa_name (trip_var, NULL_TREE);
+ trip_back = make_ssa_name (trip_var, NULL_TREE);
+ }
else
- n = get_formal_tmp_var (t, &list);
+ {
+ trip_init = trip_var;
+ trip_main = trip_var;
+ trip_back = trip_var;
+ }
- t = build_int_cst (type, 0);
- trip = get_initialized_tmp_var (t, &list, NULL);
+ t = build_gimple_modify_stmt (trip_init, build_int_cst (type, 0));
+ bsi_insert_before (&si, t, BSI_SAME_STMT);
+ if (gimple_in_ssa_p (cfun))
+ SSA_NAME_DEF_STMT (trip_init) = t;
- si = bsi_last (entry_bb);
- gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_FOR);
- bsi_insert_after (&si, list, BSI_SAME_STMT);
+ t = fold_build2 (MULT_EXPR, type, threadid, fd->chunk_size);
+ t = fold_build2 (MULT_EXPR, type, t, fd->step);
+ t = fold_build2 (PLUS_EXPR, type, t, fd->n1);
+ v_extra = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
+ true, BSI_SAME_STMT);
+
+ /* Remove the OMP_FOR. */
bsi_remove (&si, true);
/* Iteration space partitioning goes in ITER_PART_BB. */
- list = alloc_stmt_list ();
+ si = bsi_last (iter_part_bb);
- t = build2 (MULT_EXPR, type, trip, nthreads);
- t = build2 (PLUS_EXPR, type, t, threadid);
- t = build2 (MULT_EXPR, type, t, fd->chunk_size);
- s0 = get_formal_tmp_var (t, &list);
+ t = fold_build2 (MULT_EXPR, type, trip_main, nthreads);
+ t = fold_build2 (PLUS_EXPR, type, t, threadid);
+ t = fold_build2 (MULT_EXPR, type, t, fd->chunk_size);
+ s0 = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
+ false, BSI_CONTINUE_LINKING);
- t = build2 (PLUS_EXPR, type, s0, fd->chunk_size);
- t = build2 (MIN_EXPR, type, t, n);
- e0 = get_formal_tmp_var (t, &list);
+ t = fold_build2 (PLUS_EXPR, type, s0, fd->chunk_size);
+ t = fold_build2 (MIN_EXPR, type, t, n);
+ e0 = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
+ false, BSI_CONTINUE_LINKING);
t = build2 (LT_EXPR, boolean_type_node, s0, n);
- t = build3 (COND_EXPR, void_type_node, t,
- build_and_jump (&l1), build_and_jump (&l4));
- append_to_statement_list (t, &list);
-
- si = bsi_start (iter_part_bb);
- bsi_insert_after (&si, list, BSI_CONTINUE_LINKING);
+ t = build3 (COND_EXPR, void_type_node, t, NULL_TREE, NULL_TREE);
+ bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
/* Setup code for sequential iteration goes in SEQ_START_BB. */
- list = alloc_stmt_list ();
+ si = bsi_start (seq_start_bb);
t = fold_convert (type, s0);
- t = build2 (MULT_EXPR, type, t, fd->step);
- t = build2 (PLUS_EXPR, type, t, fd->n1);
+ t = fold_build2 (MULT_EXPR, type, t, fd->step);
+ t = fold_build2 (PLUS_EXPR, type, t, fd->n1);
+ t = force_gimple_operand_bsi (&si, t, false, NULL_TREE,
+ false, BSI_CONTINUE_LINKING);
t = build_gimple_modify_stmt (fd->v, t);
- gimplify_and_add (t, &list);
+ bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
+ if (gimple_in_ssa_p (cfun))
+ SSA_NAME_DEF_STMT (fd->v) = t;
t = fold_convert (type, e0);
- t = build2 (MULT_EXPR, type, t, fd->step);
- t = build2 (PLUS_EXPR, type, t, fd->n1);
- e = get_formal_tmp_var (t, &list);
-
- si = bsi_start (seq_start_bb);
- bsi_insert_after (&si, list, BSI_CONTINUE_LINKING);
+ t = fold_build2 (MULT_EXPR, type, t, fd->step);
+ t = fold_build2 (PLUS_EXPR, type, t, fd->n1);
+ e = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
+ false, BSI_CONTINUE_LINKING);
/* The code controlling the sequential loop goes in CONT_BB,
replacing the OMP_CONTINUE. */
- list = alloc_stmt_list ();
-
- t = build2 (PLUS_EXPR, type, fd->v, fd->step);
- t = build_gimple_modify_stmt (fd->v, t);
- gimplify_and_add (t, &list);
-
- t = build2 (fd->cond_code, boolean_type_node, fd->v, e);
- t = get_formal_tmp_var (t, &list);
- t = build3 (COND_EXPR, void_type_node, t,
- build_and_jump (&l2), build_and_jump (&l3));
- append_to_statement_list (t, &list);
-
si = bsi_last (cont_bb);
- gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_CONTINUE);
- bsi_insert_after (&si, list, BSI_SAME_STMT);
+ cont = bsi_stmt (si);
+ gcc_assert (TREE_CODE (cont) == OMP_CONTINUE);
+ v_main = TREE_OPERAND (cont, 1);
+ v_back = TREE_OPERAND (cont, 0);
+
+ t = build2 (PLUS_EXPR, type, v_main, fd->step);
+ t = build_gimple_modify_stmt (v_back, t);
+ bsi_insert_before (&si, t, BSI_SAME_STMT);
+ if (gimple_in_ssa_p (cfun))
+ SSA_NAME_DEF_STMT (v_back) = t;
+
+ t = build2 (fd->cond_code, boolean_type_node, v_back, e);
+ t = build3 (COND_EXPR, void_type_node, t, NULL_TREE, NULL_TREE);
+ bsi_insert_before (&si, t, BSI_SAME_STMT);
+
+ /* Remove OMP_CONTINUE. */
bsi_remove (&si, true);
/* Trip update code goes into TRIP_UPDATE_BB. */
- list = alloc_stmt_list ();
+ si = bsi_start (trip_update_bb);
t = build_int_cst (type, 1);
- t = build2 (PLUS_EXPR, type, trip, t);
- t = build_gimple_modify_stmt (trip, t);
- gimplify_and_add (t, &list);
-
- si = bsi_start (trip_update_bb);
- bsi_insert_after (&si, list, BSI_CONTINUE_LINKING);
+ t = build2 (PLUS_EXPR, type, trip_main, t);
+ t = build_gimple_modify_stmt (trip_back, t);
+ bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
+ if (gimple_in_ssa_p (cfun))
+ SSA_NAME_DEF_STMT (trip_back) = t;
/* Replace the OMP_RETURN with a barrier, or nothing. */
si = bsi_last (exit_bb);
if (!OMP_RETURN_NOWAIT (bsi_stmt (si)))
- {
- list = alloc_stmt_list ();
- build_omp_barrier (&list);
- bsi_insert_after (&si, list, BSI_SAME_STMT);
- }
+ force_gimple_operand_bsi (&si, build_omp_barrier (), false, NULL_TREE,
+ false, BSI_SAME_STMT);
bsi_remove (&si, true);
/* Connect the new blocks. */
- remove_edge (single_succ_edge (entry_bb));
- make_edge (entry_bb, iter_part_bb, EDGE_FALLTHRU);
+ find_edge (iter_part_bb, seq_start_bb)->flags = EDGE_TRUE_VALUE;
+ find_edge (iter_part_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
- make_edge (iter_part_bb, seq_start_bb, EDGE_TRUE_VALUE);
- make_edge (iter_part_bb, fin_bb, EDGE_FALSE_VALUE);
+ find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
+ find_edge (cont_bb, trip_update_bb)->flags = EDGE_FALSE_VALUE;
- make_edge (seq_start_bb, body_bb, EDGE_FALLTHRU);
+ redirect_edge_and_branch (single_succ_edge (trip_update_bb), iter_part_bb);
- remove_edge (single_succ_edge (cont_bb));
- make_edge (cont_bb, body_bb, EDGE_TRUE_VALUE);
- make_edge (cont_bb, trip_update_bb, EDGE_FALSE_VALUE);
+ if (gimple_in_ssa_p (cfun))
+ {
+ /* When we redirect the edge from trip_update_bb to iter_part_bb, we
+ remove arguments of the phi nodes in fin_bb. We need to create
+ appropriate phi nodes in iter_part_bb instead. */
+ se = single_pred_edge (fin_bb);
+ re = single_succ_edge (trip_update_bb);
+ ene = single_succ_edge (entry_bb);
+
+ args = PENDING_STMT (re);
+ PENDING_STMT (re) = NULL_TREE;
+ for (phi = phi_nodes (fin_bb);
+ phi && args;
+ phi = PHI_CHAIN (phi), args = TREE_CHAIN (args))
+ {
+ t = PHI_RESULT (phi);
+ gcc_assert (t == TREE_PURPOSE (args));
+ nphi = create_phi_node (t, iter_part_bb);
+ SSA_NAME_DEF_STMT (t) = nphi;
+
+ t = PHI_ARG_DEF_FROM_EDGE (phi, se);
+ /* A special case -- fd->v is not yet computed in iter_part_bb, we
+ need to use v_extra instead. */
+ if (t == fd->v)
+ t = v_extra;
+ add_phi_arg (nphi, t, ene);
+ add_phi_arg (nphi, TREE_VALUE (args), re);
+ }
+ gcc_assert (!phi && !args);
+ while ((phi = phi_nodes (fin_bb)) != NULL_TREE)
+ remove_phi_node (phi, NULL_TREE, false);
+
+ /* Make phi node for trip. */
+ phi = create_phi_node (trip_main, iter_part_bb);
+ SSA_NAME_DEF_STMT (trip_main) = phi;
+ add_phi_arg (phi, trip_back, single_succ_edge (trip_update_bb));
+ add_phi_arg (phi, trip_init, single_succ_edge (entry_bb));
+ }
- make_edge (trip_update_bb, iter_part_bb, EDGE_FALLTHRU);
+ set_immediate_dominator (CDI_DOMINATORS, trip_update_bb, cont_bb);
+ set_immediate_dominator (CDI_DOMINATORS, iter_part_bb,
+ recompute_dominator (CDI_DOMINATORS, iter_part_bb));
+ set_immediate_dominator (CDI_DOMINATORS, fin_bb,
+ recompute_dominator (CDI_DOMINATORS, fin_bb));
+ set_immediate_dominator (CDI_DOMINATORS, seq_start_bb,
+ recompute_dominator (CDI_DOMINATORS, seq_start_bb));
+ set_immediate_dominator (CDI_DOMINATORS, body_bb,
+ recompute_dominator (CDI_DOMINATORS, body_bb));
}
{
struct omp_for_data fd;
- push_gimplify_context ();
-
extract_omp_for_data (last_stmt (region->entry), &fd);
region->sched_kind = fd.sched_kind;
+ gcc_assert (EDGE_COUNT (region->entry->succs) == 2);
+ BRANCH_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
+ FALLTHRU_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
+ if (region->cont)
+ {
+ gcc_assert (EDGE_COUNT (region->cont->succs) == 2);
+ BRANCH_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
+ FALLTHRU_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
+ }
+
if (fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
&& !fd.have_ordered
- && region->cont
- && region->exit)
+ && region->cont != NULL)
{
if (fd.chunk_size == NULL)
expand_omp_for_static_nochunk (region, &fd);
expand_omp_for_generic (region, &fd, start_ix, next_ix);
}
- pop_gimplify_context (NULL);
+ update_ssa (TODO_update_ssa_only_virtuals);
}
reduction;
If this is a combined parallel sections, replace the call to
- GOMP_sections_start with 'goto L1'. */
+ GOMP_sections_start with call to GOMP_sections_next. */
static void
expand_omp_sections (struct omp_region *region)
{
- tree label_vec, l0, l1, l2, t, u, v, sections_stmt;
- unsigned i, len;
- basic_block entry_bb, exit_bb, l0_bb, l1_bb, l2_bb, default_bb;
+ tree label_vec, l1, l2, t, u, sections_stmt, vin, vmain, vnext, cont;
+ unsigned i, casei, len;
+ basic_block entry_bb, l0_bb, l1_bb, l2_bb, default_bb;
block_stmt_iterator si;
struct omp_region *inner;
- edge e;
+ bool exit_reachable = region->cont != NULL;
+ gcc_assert (exit_reachable == (region->exit != NULL));
entry_bb = region->entry;
- l0_bb = create_empty_bb (entry_bb);
- l0 = tree_block_label (l0_bb);
-
- gcc_assert ((region->cont != NULL) ^ (region->exit == NULL));
+ l0_bb = single_succ (entry_bb);
l1_bb = region->cont;
- if (l1_bb)
+ l2_bb = region->exit;
+ if (exit_reachable)
{
- l2_bb = single_succ (l1_bb);
+ gcc_assert (single_pred (l2_bb) == l0_bb);
default_bb = create_empty_bb (l1_bb->prev_bb);
-
l1 = tree_block_label (l1_bb);
+ l2 = tree_block_label (l2_bb);
}
else
{
- l2_bb = create_empty_bb (l0_bb);
- default_bb = l2_bb;
-
- l1 = NULL;
+ default_bb = create_empty_bb (l0_bb);
+ l1 = NULL_TREE;
+ l2 = tree_block_label (default_bb);
}
- l2 = tree_block_label (l2_bb);
-
- exit_bb = region->exit;
-
- v = create_tmp_var (unsigned_type_node, ".section");
/* We will build a switch() with enough cases for all the
OMP_SECTION regions, a '0' case to handle the end of more work
and a default case to abort if something goes wrong. */
- len = EDGE_COUNT (entry_bb->succs);
- label_vec = make_tree_vec (len + 2);
+ len = EDGE_COUNT (l0_bb->succs);
+ label_vec = make_tree_vec (len + 1);
/* The call to GOMP_sections_start goes in ENTRY_BB, replacing the
OMP_SECTIONS statement. */
si = bsi_last (entry_bb);
sections_stmt = bsi_stmt (si);
gcc_assert (TREE_CODE (sections_stmt) == OMP_SECTIONS);
+ vin = OMP_SECTIONS_CONTROL (sections_stmt);
if (!is_combined_parallel (region))
{
/* If we are not inside a combined parallel+sections region,
call GOMP_sections_start. */
- t = build_int_cst (unsigned_type_node, len);
+ t = build_int_cst (unsigned_type_node,
+ exit_reachable ? len - 1 : len);
u = built_in_decls[BUILT_IN_GOMP_SECTIONS_START];
t = build_call_expr (u, 1, t);
- t = build_gimple_modify_stmt (v, t);
- bsi_insert_after (&si, t, BSI_SAME_STMT);
}
+ else
+ {
+ /* Otherwise, call GOMP_sections_next. */
+ u = built_in_decls[BUILT_IN_GOMP_SECTIONS_NEXT];
+ t = build_call_expr (u, 0);
+ }
+ t = build_gimple_modify_stmt (vin, t);
+ bsi_insert_after (&si, t, BSI_SAME_STMT);
+ if (gimple_in_ssa_p (cfun))
+ SSA_NAME_DEF_STMT (vin) = t;
bsi_remove (&si, true);
- /* The switch() statement replacing OMP_SECTIONS goes in L0_BB. */
- si = bsi_start (l0_bb);
+ /* The switch() statement replacing OMP_SECTIONS_SWITCH goes in L0_BB. */
+ si = bsi_last (l0_bb);
+ gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_SECTIONS_SWITCH);
+ if (exit_reachable)
+ {
+ cont = last_stmt (l1_bb);
+ gcc_assert (TREE_CODE (cont) == OMP_CONTINUE);
+ vmain = TREE_OPERAND (cont, 1);
+ vnext = TREE_OPERAND (cont, 0);
+ }
+ else
+ {
+ vmain = vin;
+ vnext = NULL_TREE;
+ }
- t = build3 (SWITCH_EXPR, void_type_node, v, NULL, label_vec);
- bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
+ t = build3 (SWITCH_EXPR, void_type_node, vmain, NULL, label_vec);
+ bsi_insert_after (&si, t, BSI_SAME_STMT);
+ bsi_remove (&si, true);
- t = build3 (CASE_LABEL_EXPR, void_type_node,
- build_int_cst (unsigned_type_node, 0), NULL, l2);
- TREE_VEC_ELT (label_vec, 0) = t;
- make_edge (l0_bb, l2_bb, 0);
+ i = 0;
+ if (exit_reachable)
+ {
+ t = build3 (CASE_LABEL_EXPR, void_type_node,
+ build_int_cst (unsigned_type_node, 0), NULL, l2);
+ TREE_VEC_ELT (label_vec, 0) = t;
+ i++;
+ }
/* Convert each OMP_SECTION into a CASE_LABEL_EXPR. */
- for (inner = region->inner, i = 1; inner; inner = inner->next, ++i)
+ for (inner = region->inner, casei = 1;
+ inner;
+ inner = inner->next, i++, casei++)
{
basic_block s_entry_bb, s_exit_bb;
s_exit_bb = inner->exit;
t = tree_block_label (s_entry_bb);
- u = build_int_cst (unsigned_type_node, i);
+ u = build_int_cst (unsigned_type_node, casei);
u = build3 (CASE_LABEL_EXPR, void_type_node, u, NULL, t);
TREE_VEC_ELT (label_vec, i) = u;
gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_SECTION);
gcc_assert (i < len || OMP_SECTION_LAST (bsi_stmt (si)));
bsi_remove (&si, true);
-
- e = single_pred_edge (s_entry_bb);
- e->flags = 0;
- redirect_edge_pred (e, l0_bb);
-
single_succ_edge (s_entry_bb)->flags = EDGE_FALLTHRU;
if (s_exit_bb == NULL)
/* Error handling code goes in DEFAULT_BB. */
t = tree_block_label (default_bb);
u = build3 (CASE_LABEL_EXPR, void_type_node, NULL, NULL, t);
- TREE_VEC_ELT (label_vec, len + 1) = u;
+ TREE_VEC_ELT (label_vec, len) = u;
make_edge (l0_bb, default_bb, 0);
si = bsi_start (default_bb);
t = build_call_expr (built_in_decls[BUILT_IN_TRAP], 0);
bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
- /* Code to get the next section goes in L1_BB. */
- if (l1_bb)
+ if (exit_reachable)
{
+ /* Code to get the next section goes in L1_BB. */
si = bsi_last (l1_bb);
gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_CONTINUE);
t = build_call_expr (built_in_decls[BUILT_IN_GOMP_SECTIONS_NEXT], 0);
- t = build_gimple_modify_stmt (v, t);
+ t = build_gimple_modify_stmt (vnext, t);
bsi_insert_after (&si, t, BSI_SAME_STMT);
+ if (gimple_in_ssa_p (cfun))
+ SSA_NAME_DEF_STMT (vnext) = t;
bsi_remove (&si, true);
- }
- /* Cleanup function replaces OMP_RETURN in EXIT_BB. */
- if (exit_bb)
- {
- si = bsi_last (exit_bb);
+ single_succ_edge (l1_bb)->flags = EDGE_FALLTHRU;
+
+ /* Cleanup function replaces OMP_RETURN in EXIT_BB. */
+ si = bsi_last (l2_bb);
if (OMP_RETURN_NOWAIT (bsi_stmt (si)))
t = built_in_decls[BUILT_IN_GOMP_SECTIONS_END_NOWAIT];
else
bsi_remove (&si, true);
}
- /* Connect the new blocks. */
- if (is_combined_parallel (region))
- {
- /* If this was a combined parallel+sections region, we did not
- emit a GOMP_sections_start in the entry block, so we just
- need to jump to L1_BB to get the next section. */
- make_edge (entry_bb, l1_bb, EDGE_FALLTHRU);
- }
- else
- make_edge (entry_bb, l0_bb, EDGE_FALLTHRU);
-
- if (l1_bb)
- {
- e = single_succ_edge (l1_bb);
- redirect_edge_succ (e, l0_bb);
- e->flags = EDGE_FALLTHRU;
- }
+ set_immediate_dominator (CDI_DOMINATORS, default_bb, l0_bb);
}
si = bsi_last (exit_bb);
if (!OMP_RETURN_NOWAIT (bsi_stmt (si)) || need_barrier)
- {
- tree t = alloc_stmt_list ();
- build_omp_barrier (&t);
- bsi_insert_after (&si, t, BSI_SAME_STMT);
- }
+ force_gimple_operand_bsi (&si, build_omp_barrier (), false, NULL_TREE,
+ false, BSI_SAME_STMT);
bsi_remove (&si, true);
single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
}
}
}
+/* A subroutine of expand_omp_atomic. Attempt to implement the atomic
+ operation as a __sync_fetch_and_op builtin. INDEX is log2 of the
+ size of the data type, and thus usable to find the index of the builtin
+ decl. Returns false if the expression is not of the proper form. */
-/* Expand the parallel region tree rooted at REGION. Expansion
- proceeds in depth-first order. Innermost regions are expanded
+static bool
+expand_omp_atomic_fetch_op (basic_block load_bb,
+ tree addr, tree loaded_val,
+ tree stored_val, int index)
+{
+ enum built_in_function base;
+ tree decl, itype, call;
+ enum insn_code *optab;
+ tree rhs;
+ basic_block store_bb = single_succ (load_bb);
+ block_stmt_iterator bsi;
+ tree stmt;
+
+ /* We expect to find the following sequences:
+
+ load_bb:
+ OMP_ATOMIC_LOAD (tmp, mem)
+
+ store_bb:
+ val = tmp OP something; (or: something OP tmp)
+ OMP_STORE (val)
+
+ ???FIXME: Allow a more flexible sequence.
+ Perhaps use data flow to pick the statements.
+
+ */
+
+ bsi = bsi_after_labels (store_bb);
+ stmt = bsi_stmt (bsi);
+ if (TREE_CODE (stmt) != GIMPLE_MODIFY_STMT)
+ return false;
+ bsi_next (&bsi);
+ if (TREE_CODE (bsi_stmt (bsi)) != OMP_ATOMIC_STORE)
+ return false;
+
+ if (!operand_equal_p (GIMPLE_STMT_OPERAND (stmt, 0), stored_val, 0))
+ return false;
+
+ rhs = GIMPLE_STMT_OPERAND (stmt, 1);
+
+ /* Check for one of the supported fetch-op operations. */
+ switch (TREE_CODE (rhs))
+ {
+ case PLUS_EXPR:
+ case POINTER_PLUS_EXPR:
+ base = BUILT_IN_FETCH_AND_ADD_N;
+ optab = sync_add_optab;
+ break;
+ case MINUS_EXPR:
+ base = BUILT_IN_FETCH_AND_SUB_N;
+ optab = sync_add_optab;
+ break;
+ case BIT_AND_EXPR:
+ base = BUILT_IN_FETCH_AND_AND_N;
+ optab = sync_and_optab;
+ break;
+ case BIT_IOR_EXPR:
+ base = BUILT_IN_FETCH_AND_OR_N;
+ optab = sync_ior_optab;
+ break;
+ case BIT_XOR_EXPR:
+ base = BUILT_IN_FETCH_AND_XOR_N;
+ optab = sync_xor_optab;
+ break;
+ default:
+ return false;
+ }
+ /* Make sure the expression is of the proper form. */
+ if (operand_equal_p (TREE_OPERAND (rhs, 0), loaded_val, 0))
+ rhs = TREE_OPERAND (rhs, 1);
+ else if (commutative_tree_code (TREE_CODE (rhs))
+ && operand_equal_p (TREE_OPERAND (rhs, 1), loaded_val, 0))
+ rhs = TREE_OPERAND (rhs, 0);
+ else
+ return false;
+
+ decl = built_in_decls[base + index + 1];
+ itype = TREE_TYPE (TREE_TYPE (decl));
+
+ if (optab[TYPE_MODE (itype)] == CODE_FOR_nothing)
+ return false;
+
+ bsi = bsi_last (load_bb);
+ gcc_assert (TREE_CODE (bsi_stmt (bsi)) == OMP_ATOMIC_LOAD);
+ call = build_call_expr (decl, 2, addr, fold_convert (itype, rhs));
+ force_gimple_operand_bsi (&bsi, call, true, NULL_TREE, true, BSI_SAME_STMT);
+ bsi_remove (&bsi, true);
+
+ bsi = bsi_last (store_bb);
+ gcc_assert (TREE_CODE (bsi_stmt (bsi)) == OMP_ATOMIC_STORE);
+ bsi_remove (&bsi, true);
+ bsi = bsi_last (store_bb);
+ bsi_remove (&bsi, true);
+
+ if (gimple_in_ssa_p (cfun))
+ update_ssa (TODO_update_ssa_no_phi);
+
+ return true;
+}
+
+/* A subroutine of expand_omp_atomic. Implement the atomic operation as:
+
+ oldval = *addr;
+ repeat:
+ newval = rhs; // with oldval replacing *addr in rhs
+ oldval = __sync_val_compare_and_swap (addr, oldval, newval);
+ if (oldval != newval)
+ goto repeat;
+
+ INDEX is log2 of the size of the data type, and thus usable to find the
+ index of the builtin decl. */
+
+static bool
+expand_omp_atomic_pipeline (basic_block load_bb, basic_block store_bb,
+ tree addr, tree loaded_val, tree stored_val,
+ int index)
+{
+ tree loadedi, storedi, initial, new_storedi, old_vali;
+ tree type, itype, cmpxchg, iaddr;
+ block_stmt_iterator bsi;
+ basic_block loop_header = single_succ (load_bb);
+ tree phi, x;
+ edge e;
+
+ cmpxchg = built_in_decls[BUILT_IN_VAL_COMPARE_AND_SWAP_N + index + 1];
+ type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
+ itype = TREE_TYPE (TREE_TYPE (cmpxchg));
+
+ if (sync_compare_and_swap[TYPE_MODE (itype)] == CODE_FOR_nothing)
+ return false;
+
+ /* Load the initial value, replacing the OMP_ATOMIC_LOAD. */
+ bsi = bsi_last (load_bb);
+ gcc_assert (TREE_CODE (bsi_stmt (bsi)) == OMP_ATOMIC_LOAD);
+ /* For floating-point values, we'll need to view-convert them to integers
+ so that we can perform the atomic compare and swap. Simplify the
+ following code by always setting up the "i"ntegral variables. */
+ if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type))
+ {
+ iaddr = create_tmp_var (build_pointer_type (itype), NULL);
+ x = build_gimple_modify_stmt (iaddr,
+ fold_convert (TREE_TYPE (iaddr), addr));
+ force_gimple_operand_bsi (&bsi, x, true, NULL_TREE,
+ true, BSI_SAME_STMT);
+ DECL_NO_TBAA_P (iaddr) = 1;
+ DECL_POINTER_ALIAS_SET (iaddr) = 0;
+ loadedi = create_tmp_var (itype, NULL);
+ if (gimple_in_ssa_p (cfun))
+ {
+ add_referenced_var (iaddr);
+ add_referenced_var (loadedi);
+ loadedi = make_ssa_name (loadedi, NULL);
+ }
+ }
+ else
+ {
+ iaddr = addr;
+ loadedi = loaded_val;
+ }
+ initial = force_gimple_operand_bsi (&bsi, build_fold_indirect_ref (iaddr),
+ true, NULL_TREE, true, BSI_SAME_STMT);
+
+ /* Move the value to the LOADEDI temporary. */
+ if (gimple_in_ssa_p (cfun))
+ {
+ gcc_assert (phi_nodes (loop_header) == NULL_TREE);
+ phi = create_phi_node (loadedi, loop_header);
+ SSA_NAME_DEF_STMT (loadedi) = phi;
+ SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, single_succ_edge (load_bb)),
+ initial);
+ }
+ else
+ bsi_insert_before (&bsi,
+ build_gimple_modify_stmt (loadedi, initial),
+ BSI_SAME_STMT);
+ if (loadedi != loaded_val)
+ {
+ block_stmt_iterator bsi2;
+
+ x = build1 (VIEW_CONVERT_EXPR, type, loadedi);
+ bsi2 = bsi_start (loop_header);
+ if (gimple_in_ssa_p (cfun))
+ {
+ x = force_gimple_operand_bsi (&bsi2, x, true, NULL_TREE,
+ true, BSI_SAME_STMT);
+ x = build_gimple_modify_stmt (loaded_val, x);
+ bsi_insert_before (&bsi2, x, BSI_SAME_STMT);
+ SSA_NAME_DEF_STMT (loaded_val) = x;
+ }
+ else
+ {
+ x = build_gimple_modify_stmt (loaded_val, x);
+ force_gimple_operand_bsi (&bsi2, x, true, NULL_TREE,
+ true, BSI_SAME_STMT);
+ }
+ }
+ bsi_remove (&bsi, true);
+
+ bsi = bsi_last (store_bb);
+ gcc_assert (TREE_CODE (bsi_stmt (bsi)) == OMP_ATOMIC_STORE);
+
+ if (iaddr == addr)
+ storedi = stored_val;
+ else
+ storedi =
+ force_gimple_operand_bsi (&bsi,
+ build1 (VIEW_CONVERT_EXPR, itype,
+ stored_val), true, NULL_TREE, true,
+ BSI_SAME_STMT);
+
+ /* Build the compare&swap statement. */
+ new_storedi = build_call_expr (cmpxchg, 3, iaddr, loadedi, storedi);
+ new_storedi = force_gimple_operand_bsi (&bsi,
+ fold_convert (itype, new_storedi),
+ true, NULL_TREE,
+ true, BSI_SAME_STMT);
+
+ if (gimple_in_ssa_p (cfun))
+ old_vali = loadedi;
+ else
+ {
+ old_vali = create_tmp_var (itype, NULL);
+ if (gimple_in_ssa_p (cfun))
+ add_referenced_var (old_vali);
+ x = build_gimple_modify_stmt (old_vali, loadedi);
+ force_gimple_operand_bsi (&bsi, x, true, NULL_TREE,
+ true, BSI_SAME_STMT);
+
+ x = build_gimple_modify_stmt (loadedi, new_storedi);
+ force_gimple_operand_bsi (&bsi, x, true, NULL_TREE,
+ true, BSI_SAME_STMT);
+ }
+
+ /* Note that we always perform the comparison as an integer, even for
+ floating point. This allows the atomic operation to properly
+ succeed even with NaNs and -0.0. */
+ x = build2 (NE_EXPR, boolean_type_node, new_storedi, old_vali);
+ x = build3 (COND_EXPR, void_type_node, x, NULL_TREE, NULL_TREE);
+ bsi_insert_before (&bsi, x, BSI_SAME_STMT);
+
+ /* Update cfg. */
+ e = single_succ_edge (store_bb);
+ e->flags &= ~EDGE_FALLTHRU;
+ e->flags |= EDGE_FALSE_VALUE;
+
+ e = make_edge (store_bb, loop_header, EDGE_TRUE_VALUE);
+
+ /* Copy the new value to loadedi (we already did that before the condition
+ if we are not in SSA). */
+ if (gimple_in_ssa_p (cfun))
+ {
+ phi = phi_nodes (loop_header);
+ SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, e), new_storedi);
+ }
+
+ /* Remove OMP_ATOMIC_STORE. */
+ bsi_remove (&bsi, true);
+
+ if (gimple_in_ssa_p (cfun))
+ update_ssa (TODO_update_ssa_no_phi);
+
+ return true;
+}
+
+/* A subroutine of expand_omp_atomic. Implement the atomic operation as:
+
+ GOMP_atomic_start ();
+ *addr = rhs;
+ GOMP_atomic_end ();
+
+ The result is not globally atomic, but works so long as all parallel
+ references are within #pragma omp atomic directives. According to
+ responses received from omp@openmp.org, appears to be within spec.
+ Which makes sense, since that's how several other compilers handle
+ this situation as well.
+ LOADED_VAL and ADDR are the operands of OMP_ATOMIC_LOAD we're expanding.
+ STORED_VAL is the operand of the matching OMP_ATOMIC_STORE.
+
+ We replace
+ OMP_ATOMIC_LOAD (loaded_val, addr) with
+ loaded_val = *addr;
+
+ and replace
+ OMP_ATOMIC_ATORE (stored_val) with
+ *addr = stored_val;
+*/
+
+static bool
+expand_omp_atomic_mutex (basic_block load_bb, basic_block store_bb,
+ tree addr, tree loaded_val, tree stored_val)
+{
+ block_stmt_iterator bsi;
+ tree t;
+
+ bsi = bsi_last (load_bb);
+ gcc_assert (TREE_CODE (bsi_stmt (bsi)) == OMP_ATOMIC_LOAD);
+
+ t = built_in_decls[BUILT_IN_GOMP_ATOMIC_START];
+ t = build_function_call_expr (t, 0);
+ force_gimple_operand_bsi (&bsi, t, true, NULL_TREE, true, BSI_SAME_STMT);
+
+ t = build_gimple_modify_stmt (loaded_val, build_fold_indirect_ref (addr));
+ if (gimple_in_ssa_p (cfun))
+ SSA_NAME_DEF_STMT (loaded_val) = t;
+ bsi_insert_before (&bsi, t, BSI_SAME_STMT);
+ bsi_remove (&bsi, true);
+
+ bsi = bsi_last (store_bb);
+ gcc_assert (TREE_CODE (bsi_stmt (bsi)) == OMP_ATOMIC_STORE);
+
+ t = build_gimple_modify_stmt (build_fold_indirect_ref (unshare_expr (addr)),
+ stored_val);
+ bsi_insert_before (&bsi, t, BSI_SAME_STMT);
+
+ t = built_in_decls[BUILT_IN_GOMP_ATOMIC_END];
+ t = build_function_call_expr (t, 0);
+ force_gimple_operand_bsi (&bsi, t, true, NULL_TREE, true, BSI_SAME_STMT);
+ bsi_remove (&bsi, true);
+
+ if (gimple_in_ssa_p (cfun))
+ update_ssa (TODO_update_ssa_no_phi);
+ return true;
+}
+
+/* Expand an OMP_ATOMIC statement. We try to expand
+ using expand_omp_atomic_fetch_op. If it failed, we try to
+ call expand_omp_atomic_pipeline, and if it fails too, the
+ ultimate fallback is wrapping the operation in a mutex
+ (expand_omp_atomic_mutex). REGION is the atomic region built
+ by build_omp_regions_1(). */
+
+static void
+expand_omp_atomic (struct omp_region *region)
+{
+ basic_block load_bb = region->entry, store_bb = region->exit;
+ tree load = last_stmt (load_bb), store = last_stmt (store_bb);
+ tree loaded_val = TREE_OPERAND (load, 0);
+ tree addr = TREE_OPERAND (load, 1);
+ tree stored_val = TREE_OPERAND (store, 0);
+ tree type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
+ HOST_WIDE_INT index;
+
+ /* Make sure the type is one of the supported sizes. */
+ index = tree_low_cst (TYPE_SIZE_UNIT (type), 1);
+ index = exact_log2 (index);
+ if (index >= 0 && index <= 4)
+ {
+ unsigned int align = TYPE_ALIGN_UNIT (type);
+
+ /* __sync builtins require strict data alignment. */
+ if (exact_log2 (align) >= index)
+ {
+ /* When possible, use specialized atomic update functions. */
+ if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
+ && store_bb == single_succ (load_bb))
+ {
+ if (expand_omp_atomic_fetch_op (load_bb, addr,
+ loaded_val, stored_val, index))
+ return;
+ }
+
+ /* If we don't have specialized __sync builtins, try and implement
+ as a compare and swap loop. */
+ if (expand_omp_atomic_pipeline (load_bb, store_bb, addr,
+ loaded_val, stored_val, index))
+ return;
+ }
+ }
+
+ /* The ultimate fallback is wrapping the operation in a mutex. */
+ expand_omp_atomic_mutex (load_bb, store_bb, addr, loaded_val, stored_val);
+}
+
+
+/* Expand the parallel region tree rooted at REGION. Expansion
+ proceeds in depth-first order. Innermost regions are expanded
first. This way, parallel regions that require a new function to
be created (e.g., OMP_PARALLEL) can be expanded without having any
internal dependencies in their body. */
{
while (region)
{
+ /* First, determine whether this is a combined parallel+workshare
+ region. */
+ if (region->type == OMP_PARALLEL)
+ determine_parallel_type (region);
+
if (region->inner)
expand_omp (region->inner);
expand_omp_synch (region);
break;
+ case OMP_ATOMIC_LOAD:
+ expand_omp_atomic (region);
+ break;
+
+
default:
gcc_unreachable ();
}
/* Helper for build_omp_regions. Scan the dominator tree starting at
- block BB. PARENT is the region that contains BB. */
+ block BB. PARENT is the region that contains BB. If SINGLE_TREE is
+ true, the function ends once a single tree is built (otherwise, whole
+ forest of OMP constructs may be built). */
static void
-build_omp_regions_1 (basic_block bb, struct omp_region *parent)
+build_omp_regions_1 (basic_block bb, struct omp_region *parent,
+ bool single_tree)
{
block_stmt_iterator si;
tree stmt;
stmt = bsi_stmt (si);
code = TREE_CODE (stmt);
-
if (code == OMP_RETURN)
{
/* STMT is the return point out of region PARENT. Mark it
region = parent;
region->exit = bb;
parent = parent->outer;
-
- /* If REGION is a parallel region, determine whether it is
- a combined parallel+workshare region. */
- if (region->type == OMP_PARALLEL)
- determine_parallel_type (region);
}
+ else if (code == OMP_ATOMIC_STORE)
+ {
+ /* OMP_ATOMIC_STORE is analoguous to OMP_RETURN, but matches with
+ OMP_ATOMIC_LOAD. */
+ gcc_assert (parent);
+ gcc_assert (parent->type == OMP_ATOMIC_LOAD);
+ region = parent;
+ region->exit = bb;
+ parent = parent->outer;
+ }
+
else if (code == OMP_CONTINUE)
{
gcc_assert (parent);
parent->cont = bb;
}
+ else if (code == OMP_SECTIONS_SWITCH)
+ {
+ /* OMP_SECTIONS_SWITCH is part of OMP_SECTIONS, and we do nothing for
+ it. */ ;
+ }
else
{
/* Otherwise, this directive becomes the parent for a new
}
}
+ if (single_tree && !parent)
+ return;
+
for (son = first_dom_son (CDI_DOMINATORS, bb);
son;
son = next_dom_son (CDI_DOMINATORS, son))
- build_omp_regions_1 (son, parent);
+ build_omp_regions_1 (son, parent, single_tree);
}
+/* Builds the tree of OMP regions rooted at ROOT, storing it to
+ root_omp_region. */
+
+static void
+build_omp_regions_root (basic_block root)
+{
+ gcc_assert (root_omp_region == NULL);
+ build_omp_regions_1 (root, NULL, true);
+ gcc_assert (root_omp_region != NULL);
+}
+
+/* Expands omp construct (and its subconstructs) starting in HEAD. */
+
+void
+omp_expand_local (basic_block head)
+{
+ build_omp_regions_root (head);
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ fprintf (dump_file, "\nOMP region tree\n\n");
+ dump_omp_region (dump_file, root_omp_region, 0);
+ fprintf (dump_file, "\n");
+ }
+
+ remove_exit_barriers (root_omp_region);
+ expand_omp (root_omp_region);
+
+ free_omp_regions ();
+}
/* Scan the CFG and build a tree of OMP regions. Return the root of
the OMP region tree. */
{
gcc_assert (root_omp_region == NULL);
calculate_dominance_info (CDI_DOMINATORS);
- build_omp_regions_1 (ENTRY_BLOCK_PTR, NULL);
+ build_omp_regions_1 (ENTRY_BLOCK_PTR, NULL, false);
}
expand_omp (root_omp_region);
- free_dominance_info (CDI_DOMINATORS);
- free_dominance_info (CDI_POST_DOMINATORS);
cleanup_tree_cfg ();
free_omp_regions ();
return 0;
}
+/* OMP expansion -- the default pass, run before creation of SSA form. */
+
static bool
gate_expand_omp (void)
{
- return flag_openmp != 0 && errorcount == 0;
+ return (flag_openmp != 0 && errorcount == 0);
}
-struct tree_opt_pass pass_expand_omp =
+struct gimple_opt_pass pass_expand_omp =
{
+ {
+ GIMPLE_PASS,
"ompexp", /* name */
gate_expand_omp, /* gate */
execute_expand_omp, /* execute */
PROP_gimple_lomp, /* properties_provided */
0, /* properties_destroyed */
0, /* todo_flags_start */
- TODO_dump_func, /* todo_flags_finish */
- 0 /* letter */
+ TODO_dump_func /* todo_flags_finish */
+ }
};
\f
/* Routines to lower OpenMP directives into OMP-GIMPLE. */
static void
lower_omp_sections (tree *stmt_p, omp_context *ctx)
{
- tree new_stmt, stmt, body, bind, block, ilist, olist, new_body;
+ tree new_stmt, stmt, body, bind, block, ilist, olist, new_body, control;
tree t, dlist;
tree_stmt_iterator tsi;
unsigned i, len;
new_body = alloc_stmt_list ();
append_to_statement_list (ilist, &new_body);
append_to_statement_list (stmt, &new_body);
+ append_to_statement_list (make_node (OMP_SECTIONS_SWITCH), &new_body);
append_to_statement_list (bind, &new_body);
- t = make_node (OMP_CONTINUE);
+ control = create_tmp_var (unsigned_type_node, ".section");
+ t = build2 (OMP_CONTINUE, void_type_node, control, control);
+ OMP_SECTIONS_CONTROL (stmt) = control;
append_to_statement_list (t, &new_body);
append_to_statement_list (olist, &new_body);
append_to_statement_list (OMP_FOR_BODY (stmt), body_p);
- t = make_node (OMP_CONTINUE);
+ t = build2 (OMP_CONTINUE, void_type_node, fd.v, fd.v);
append_to_statement_list (t, body_p);
/* After the loop, add exit clauses. */
*stmt_p = new_stmt;
}
+/* Callback for walk_stmts. Check if *TP only contains OMP_FOR
+ or OMP_PARALLEL. */
+
+static tree
+check_combined_parallel (tree *tp, int *walk_subtrees, void *data)
+{
+ struct walk_stmt_info *wi = data;
+ int *info = wi->info;
+
+ *walk_subtrees = 0;
+ switch (TREE_CODE (*tp))
+ {
+ case OMP_FOR:
+ case OMP_SECTIONS:
+ *info = *info == 0 ? 1 : -1;
+ break;
+ default:
+ *info = -1;
+ break;
+ }
+ return NULL;
+}
/* Lower the OpenMP parallel directive in *STMT_P. CTX holds context
information for the directive. */
par_bind = OMP_PARALLEL_BODY (stmt);
par_body = BIND_EXPR_BODY (par_bind);
child_fn = ctx->cb.dst_fn;
+ if (!OMP_PARALLEL_COMBINED (stmt))
+ {
+ struct walk_stmt_info wi;
+ int ws_num = 0;
+
+ memset (&wi, 0, sizeof (wi));
+ wi.callback = check_combined_parallel;
+ wi.info = &ws_num;
+ wi.val_only = true;
+ walk_stmts (&wi, &par_bind);
+ if (ws_num == 1)
+ OMP_PARALLEL_COMBINED (stmt) = 1;
+ }
push_gimplify_context ();
pop_gimplify_context (NULL_TREE);
}
-
-/* Pass *TP back through the gimplifier within the context determined by WI.
- This handles replacement of DECL_VALUE_EXPR, as well as adjusting the
- flags on ADDR_EXPR. */
-
-static void
-lower_regimplify (tree *tp, struct walk_stmt_info *wi)
-{
- enum gimplify_status gs;
- tree pre = NULL;
-
- if (wi->is_lhs)
- gs = gimplify_expr (tp, &pre, NULL, is_gimple_lvalue, fb_lvalue);
- else if (wi->val_only)
- gs = gimplify_expr (tp, &pre, NULL, is_gimple_val, fb_rvalue);
- else
- gs = gimplify_expr (tp, &pre, NULL, is_gimple_formal_tmp_var, fb_rvalue);
- gcc_assert (gs == GS_ALL_DONE);
-
- if (pre)
- tsi_link_before (&wi->tsi, pre, TSI_SAME_STMT);
-}
-
-/* Copy EXP into a temporary. Insert the initialization statement before TSI. */
+/* Callback for lower_omp_1. Return non-NULL if *tp needs to be
+ regimplified. */
static tree
-init_tmp_var (tree exp, tree_stmt_iterator *tsi)
+lower_omp_2 (tree *tp, int *walk_subtrees, void *data ATTRIBUTE_UNUSED)
{
- tree t, stmt;
-
- t = create_tmp_var (TREE_TYPE (exp), NULL);
- DECL_GIMPLE_REG_P (t) = 1;
- stmt = build_gimple_modify_stmt (t, exp);
- SET_EXPR_LOCUS (stmt, EXPR_LOCUS (tsi_stmt (*tsi)));
- tsi_link_before (tsi, stmt, TSI_SAME_STMT);
-
- return t;
-}
-
-/* Similarly, but copy from the temporary and insert the statement
- after the iterator. */
+ tree t = *tp;
-static tree
-save_tmp_var (tree exp, tree_stmt_iterator *tsi)
-{
- tree t, stmt;
+ /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
+ if (TREE_CODE (t) == VAR_DECL && DECL_HAS_VALUE_EXPR_P (t))
+ return t;
- t = create_tmp_var (TREE_TYPE (exp), NULL);
- DECL_GIMPLE_REG_P (t) = 1;
- stmt = build_gimple_modify_stmt (exp, t);
- SET_EXPR_LOCUS (stmt, EXPR_LOCUS (tsi_stmt (*tsi)));
- tsi_link_after (tsi, stmt, TSI_SAME_STMT);
+ /* If a global variable has been privatized, TREE_CONSTANT on
+ ADDR_EXPR might be wrong. */
+ if (TREE_CODE (t) == ADDR_EXPR)
+ recompute_tree_invariant_for_addr_expr (t);
- return t;
+ *walk_subtrees = !TYPE_P (t) && !DECL_P (t);
+ return NULL_TREE;
}
-/* Callback for walk_stmts. Lower the OpenMP directive pointed by TP. */
-
-static tree
-lower_omp_1 (tree *tp, int *walk_subtrees, void *data)
+static void
+lower_omp_1 (tree *tp, omp_context *ctx, tree_stmt_iterator *tsi)
{
- struct walk_stmt_info *wi = data;
- omp_context *ctx = wi->info;
tree t = *tp;
+ if (!t)
+ return;
+
+ if (EXPR_HAS_LOCATION (t))
+ input_location = EXPR_LOCATION (t);
+
/* If we have issued syntax errors, avoid doing any heavy lifting.
Just replace the OpenMP directives with a NOP to avoid
confusing RTL expansion. */
- if (errorcount && OMP_DIRECTIVE_P (*tp))
+ if (errorcount && OMP_DIRECTIVE_P (t))
{
*tp = build_empty_stmt ();
- return NULL_TREE;
+ return;
}
- *walk_subtrees = 0;
- switch (TREE_CODE (*tp))
+ switch (TREE_CODE (t))
{
+ case STATEMENT_LIST:
+ {
+ tree_stmt_iterator i;
+ for (i = tsi_start (t); !tsi_end_p (i); tsi_next (&i))
+ lower_omp_1 (tsi_stmt_ptr (i), ctx, &i);
+ }
+ break;
+
+ case COND_EXPR:
+ lower_omp_1 (&COND_EXPR_THEN (t), ctx, NULL);
+ lower_omp_1 (&COND_EXPR_ELSE (t), ctx, NULL);
+ if (ctx
+ && walk_tree (&COND_EXPR_COND (t), lower_omp_2, ctx, NULL))
+ {
+ tree pre = NULL;
+ gimplify_expr (&COND_EXPR_COND (t), &pre, NULL,
+ is_gimple_condexpr, fb_rvalue);
+ if (pre)
+ {
+ if (tsi)
+ tsi_link_before (tsi, pre, TSI_SAME_STMT);
+ else
+ {
+ append_to_statement_list (t, &pre);
+ *tp = pre;
+ }
+ }
+ }
+ break;
+ case CATCH_EXPR:
+ lower_omp_1 (&CATCH_BODY (t), ctx, NULL);
+ break;
+ case EH_FILTER_EXPR:
+ lower_omp_1 (&EH_FILTER_FAILURE (t), ctx, NULL);
+ break;
+ case TRY_CATCH_EXPR:
+ case TRY_FINALLY_EXPR:
+ lower_omp_1 (&TREE_OPERAND (t, 0), ctx, NULL);
+ lower_omp_1 (&TREE_OPERAND (t, 1), ctx, NULL);
+ break;
+ case BIND_EXPR:
+ lower_omp_1 (&BIND_EXPR_BODY (t), ctx, NULL);
+ break;
+ case RETURN_EXPR:
+ lower_omp_1 (&TREE_OPERAND (t, 0), ctx, NULL);
+ break;
+
case OMP_PARALLEL:
ctx = maybe_lookup_ctx (t);
lower_omp_parallel (tp, ctx);
break;
-
case OMP_FOR:
ctx = maybe_lookup_ctx (t);
gcc_assert (ctx);
lower_omp_for (tp, ctx);
break;
-
case OMP_SECTIONS:
ctx = maybe_lookup_ctx (t);
gcc_assert (ctx);
lower_omp_sections (tp, ctx);
break;
-
case OMP_SINGLE:
ctx = maybe_lookup_ctx (t);
gcc_assert (ctx);
lower_omp_single (tp, ctx);
break;
-
case OMP_MASTER:
ctx = maybe_lookup_ctx (t);
gcc_assert (ctx);
lower_omp_master (tp, ctx);
break;
-
case OMP_ORDERED:
ctx = maybe_lookup_ctx (t);
gcc_assert (ctx);
lower_omp_ordered (tp, ctx);
break;
-
case OMP_CRITICAL:
ctx = maybe_lookup_ctx (t);
gcc_assert (ctx);
lower_omp_critical (tp, ctx);
break;
- case VAR_DECL:
- if (ctx && DECL_HAS_VALUE_EXPR_P (t))
+ default:
+ if (ctx && walk_tree (tp, lower_omp_2, ctx, NULL))
{
- lower_regimplify (&t, wi);
- if (wi->val_only)
+ /* The gimplifier doesn't gimplify CALL_EXPR_STATIC_CHAIN.
+ Handle that here. */
+ tree call = get_call_expr_in (t);
+ if (call
+ && CALL_EXPR_STATIC_CHAIN (call)
+ && walk_tree (&CALL_EXPR_STATIC_CHAIN (call), lower_omp_2,
+ ctx, NULL))
{
- if (wi->is_lhs)
- t = save_tmp_var (t, &wi->tsi);
- else
- t = init_tmp_var (t, &wi->tsi);
+ tree pre = NULL;
+ gimplify_expr (&CALL_EXPR_STATIC_CHAIN (call), &pre, NULL,
+ is_gimple_val, fb_rvalue);
+ if (pre)
+ {
+ if (tsi)
+ tsi_link_before (tsi, pre, TSI_SAME_STMT);
+ else
+ {
+ append_to_statement_list (t, &pre);
+ lower_omp_1 (&pre, ctx, NULL);
+ *tp = pre;
+ return;
+ }
+ }
}
- *tp = t;
- }
- break;
- case ADDR_EXPR:
- if (ctx)
- lower_regimplify (tp, wi);
- break;
-
- case ARRAY_REF:
- case ARRAY_RANGE_REF:
- case REALPART_EXPR:
- case IMAGPART_EXPR:
- case COMPONENT_REF:
- case VIEW_CONVERT_EXPR:
- if (ctx)
- lower_regimplify (tp, wi);
- break;
-
- case INDIRECT_REF:
- if (ctx)
- {
- wi->is_lhs = false;
- wi->val_only = true;
- lower_regimplify (&TREE_OPERAND (t, 0), wi);
+ if (tsi == NULL)
+ gimplify_stmt (tp);
+ else
+ {
+ tree pre = NULL;
+ gimplify_expr (tp, &pre, NULL, is_gimple_stmt, fb_none);
+ if (pre)
+ tsi_link_before (tsi, pre, TSI_SAME_STMT);
+ }
}
break;
-
- default:
- if (!TYPE_P (t) && !DECL_P (t))
- *walk_subtrees = 1;
- break;
}
-
- return NULL_TREE;
}
static void
lower_omp (tree *stmt_p, omp_context *ctx)
{
- struct walk_stmt_info wi;
-
- memset (&wi, 0, sizeof (wi));
- wi.callback = lower_omp_1;
- wi.info = ctx;
- wi.val_only = true;
- wi.want_locations = true;
-
- walk_stmts (&wi, stmt_p);
+ lower_omp_1 (stmt_p, ctx, NULL);
}
\f
/* Main entry point. */
return flag_openmp != 0;
}
-struct tree_opt_pass pass_lower_omp =
+struct gimple_opt_pass pass_lower_omp =
{
+ {
+ GIMPLE_PASS,
"omplower", /* name */
gate_lower_omp, /* gate */
execute_lower_omp, /* execute */
PROP_gimple_lomp, /* properties_provided */
0, /* properties_destroyed */
0, /* todo_flags_start */
- TODO_dump_func, /* todo_flags_finish */
- 0 /* letter */
+ TODO_dump_func /* todo_flags_finish */
+ }
};
\f
/* The following is a utility to diagnose OpenMP structured block violations.