}
static inline tree
-maybe_lookup_decl (tree var, omp_context *ctx)
+maybe_lookup_decl (const_tree var, omp_context *ctx)
{
tree *n;
n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
return n ? (tree) n->value : NULL_TREE;
}
-/* Return true if DECL should be copied by pointer. SHARED_P is true
- if DECL is to be shared. */
+/* Return true if DECL should be copied by pointer. SHARED_CTX is
+ the parallel context if DECL is to be shared. */
static bool
-use_pointer_for_field (const_tree decl, bool shared_p)
+use_pointer_for_field (const_tree decl, omp_context *shared_ctx)
{
if (AGGREGATE_TYPE_P (TREE_TYPE (decl)))
return true;
/* We can only use copy-in/copy-out semantics for shared variables
when we know the value is not accessible from an outer scope. */
- if (shared_p)
+ if (shared_ctx)
{
/* ??? Trivially accessible from anywhere. But why would we even
be passing an address in this case? Should we simply assert
address taken. */
if (TREE_ADDRESSABLE (decl))
return true;
+
+ /* Disallow copy-in/out in nested parallel if
+ decl is shared in outer parallel, otherwise
+ each thread could store the shared variable
+ in its own copy-in location, making the
+ variable no longer really shared. */
+ if (!TREE_READONLY (decl) && shared_ctx->is_nested)
+ {
+ omp_context *up;
+
+ for (up = shared_ctx->outer; up; up = up->outer)
+ if (maybe_lookup_decl (decl, up))
+ break;
+
+ if (up && is_parallel_ctx (up))
+ {
+ tree c;
+
+ for (c = OMP_PARALLEL_CLAUSES (up->stmt);
+ c; c = OMP_CLAUSE_CHAIN (c))
+ if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
+ && OMP_CLAUSE_DECL (c) == decl)
+ break;
+
+ if (c)
+ return true;
+ }
+ }
}
return false;
}
else if (is_parallel_ctx (ctx))
{
- bool by_ref = use_pointer_for_field (var, false);
+ bool by_ref = use_pointer_for_field (var, NULL);
x = build_receiver_ref (var, by_ref, ctx);
}
else if (ctx->outer)
case OMP_CLAUSE_SHARED:
gcc_assert (is_parallel_ctx (ctx));
decl = OMP_CLAUSE_DECL (c);
- gcc_assert (!is_variable_sized (decl));
- by_ref = use_pointer_for_field (decl, true);
+ gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl))
+ || !is_variable_sized (decl));
+ by_ref = use_pointer_for_field (decl, ctx);
/* Global variables don't need to be copied,
the receiver side will use them directly. */
if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
&& ! is_global_var (maybe_lookup_decl_in_outer_ctx (decl,
ctx)))
{
- by_ref = use_pointer_for_field (decl, false);
+ by_ref = use_pointer_for_field (decl, NULL);
install_var_field (decl, by_ref, ctx);
}
install_var_local (decl, ctx);
case OMP_CLAUSE_COPYIN:
decl = OMP_CLAUSE_DECL (c);
- by_ref = use_pointer_for_field (decl, false);
+ by_ref = use_pointer_for_field (decl, NULL);
install_var_field (decl, by_ref, ctx);
break;
/* Set up the DECL_VALUE_EXPR for shared variables now. This
needs to be delayed until after fixup_child_record_type so
that we get the correct type during the dereference. */
- by_ref = use_pointer_for_field (var, true);
+ by_ref = use_pointer_for_field (var, ctx);
x = build_receiver_ref (var, by_ref, ctx);
SET_DECL_VALUE_EXPR (new_var, x);
DECL_HAS_VALUE_EXPR_P (new_var) = 1;
break;
case OMP_CLAUSE_COPYIN:
- by_ref = use_pointer_for_field (var, false);
+ by_ref = use_pointer_for_field (var, NULL);
x = build_receiver_ref (var, by_ref, ctx);
x = lang_hooks.decls.omp_clause_assign_op (c, new_var, x);
append_to_statement_list (x, ©in_seq);
continue;
var = OMP_CLAUSE_DECL (c);
- by_ref = use_pointer_for_field (var, false);
+ by_ref = use_pointer_for_field (var, NULL);
ref = build_sender_ref (var, ctx);
x = lookup_decl_in_outer_ctx (var, ctx);
continue;
if (is_variable_sized (val))
continue;
- by_ref = use_pointer_for_field (val, false);
+ by_ref = use_pointer_for_field (val, NULL);
switch (OMP_CLAUSE_CODE (c))
{
mapping for OVAR. */
var = lookup_decl_in_outer_ctx (ovar, ctx);
- if (use_pointer_for_field (ovar, true))
+ if (use_pointer_for_field (ovar, ctx))
{
x = build_sender_ref (ovar, ctx);
var = build_fold_addr_expr (var);
/* Declare local variables needed in CHILD_CFUN. */
block = DECL_INITIAL (child_fn);
- BLOCK_VARS (block) = list2chain (child_cfun->unexpanded_var_list);
+ BLOCK_VARS (block) = list2chain (child_cfun->local_decls);
DECL_SAVED_TREE (child_fn) = bb_stmt_list (single_succ (entry_bb));
/* Reset DECL_CONTEXT on function arguments. */
if (gimple_in_ssa_p (cfun))
{
push_cfun (child_cfun);
- init_tree_ssa ();
+ init_tree_ssa (child_cfun);
init_ssa_operands ();
cfun->gimple_df->in_ssa_p = true;
pop_cfun ();
t = build3 (COND_EXPR, void_type_node, t, NULL_TREE, NULL_TREE);
bsi_insert_after (&si, t, BSI_SAME_STMT);
- /* V may be used outside of the loop (e.g., to handle lastprivate clause).
- If this is the case, its value is undefined if the loop is not entered
- at all. To handle this case, set its initial value to N1. */
- if (gimple_in_ssa_p (cfun))
- {
- e = find_edge (entry_bb, l3_bb);
- for (phi = phi_nodes (l3_bb); phi; phi = PHI_CHAIN (phi))
- if (PHI_ARG_DEF_FROM_EDGE (phi, e) == fd->v)
- SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, e), fd->n1);
- }
- else
- {
- t = build_gimple_modify_stmt (fd->v, fd->n1);
- bsi_insert_before (&si, t, BSI_SAME_STMT);
- }
-
/* Remove the OMP_FOR statement. */
bsi_remove (&si, true);
t = fold_build2 (MIN_EXPR, type, t, n);
e0 = force_gimple_operand_bsi (&si, t, true, NULL_TREE, true, BSI_SAME_STMT);
- t = fold_convert (type, s0);
- t = fold_build2 (MULT_EXPR, type, t, fd->step);
- t = fold_build2 (PLUS_EXPR, type, t, fd->n1);
- t = force_gimple_operand_bsi (&si, t, false, NULL_TREE,
- true, BSI_SAME_STMT);
- t = build_gimple_modify_stmt (fd->v, t);
- bsi_insert_before (&si, t, BSI_SAME_STMT);
- if (gimple_in_ssa_p (cfun))
- SSA_NAME_DEF_STMT (fd->v) = t;
-
t = build2 (GE_EXPR, boolean_type_node, s0, e0);
t = build3 (COND_EXPR, void_type_node, t, NULL_TREE, NULL_TREE);
bsi_insert_before (&si, t, BSI_SAME_STMT);
/* Setup code for sequential iteration goes in SEQ_START_BB. */
si = bsi_start (seq_start_bb);
+ t = fold_convert (type, s0);
+ t = fold_build2 (MULT_EXPR, type, t, fd->step);
+ t = fold_build2 (PLUS_EXPR, type, t, fd->n1);
+ t = force_gimple_operand_bsi (&si, t, false, NULL_TREE,
+ false, BSI_CONTINUE_LINKING);
+ t = build_gimple_modify_stmt (fd->v, t);
+ bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
+ if (gimple_in_ssa_p (cfun))
+ SSA_NAME_DEF_STMT (fd->v) = t;
+
t = fold_convert (type, e0);
t = fold_build2 (MULT_EXPR, type, t, fd->step);
t = fold_build2 (PLUS_EXPR, type, t, fd->n1);
tree addr, tree loaded_val, tree stored_val,
int index)
{
- tree loadedi, storedi, initial, new_stored, new_storedi, old_vali;
+ tree loadedi, storedi, initial, new_storedi, old_vali;
tree type, itype, cmpxchg, iaddr;
block_stmt_iterator bsi;
basic_block loop_header = single_succ (load_bb);
/* Load the initial value, replacing the OMP_ATOMIC_LOAD. */
bsi = bsi_last (load_bb);
gcc_assert (TREE_CODE (bsi_stmt (bsi)) == OMP_ATOMIC_LOAD);
- initial = force_gimple_operand_bsi (&bsi, build_fold_indirect_ref (addr),
+ /* For floating-point values, we'll need to view-convert them to integers
+ so that we can perform the atomic compare and swap. Simplify the
+ following code by always setting up the "i"ntegral variables. */
+ if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type))
+ {
+ iaddr = create_tmp_var (build_pointer_type (itype), NULL);
+ x = build_gimple_modify_stmt (iaddr,
+ fold_convert (TREE_TYPE (iaddr), addr));
+ force_gimple_operand_bsi (&bsi, x, true, NULL_TREE,
+ true, BSI_SAME_STMT);
+ DECL_NO_TBAA_P (iaddr) = 1;
+ DECL_POINTER_ALIAS_SET (iaddr) = 0;
+ loadedi = create_tmp_var (itype, NULL);
+ if (gimple_in_ssa_p (cfun))
+ {
+ add_referenced_var (iaddr);
+ add_referenced_var (loadedi);
+ loadedi = make_ssa_name (loadedi, NULL);
+ }
+ }
+ else
+ {
+ iaddr = addr;
+ loadedi = loaded_val;
+ }
+ initial = force_gimple_operand_bsi (&bsi, build_fold_indirect_ref (iaddr),
true, NULL_TREE, true, BSI_SAME_STMT);
- /* Move the value to the LOADED_VAL temporary. */
+
+ /* Move the value to the LOADEDI temporary. */
if (gimple_in_ssa_p (cfun))
{
gcc_assert (phi_nodes (loop_header) == NULL_TREE);
- phi = create_phi_node (loaded_val, loop_header);
- SSA_NAME_DEF_STMT (loaded_val) = phi;
+ phi = create_phi_node (loadedi, loop_header);
+ SSA_NAME_DEF_STMT (loadedi) = phi;
SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, single_succ_edge (load_bb)),
initial);
}
else
bsi_insert_before (&bsi,
- build_gimple_modify_stmt (loaded_val, initial),
+ build_gimple_modify_stmt (loadedi, initial),
BSI_SAME_STMT);
+ if (loadedi != loaded_val)
+ {
+ block_stmt_iterator bsi2;
+
+ x = build1 (VIEW_CONVERT_EXPR, type, loadedi);
+ bsi2 = bsi_start (loop_header);
+ if (gimple_in_ssa_p (cfun))
+ {
+ x = force_gimple_operand_bsi (&bsi2, x, true, NULL_TREE,
+ true, BSI_SAME_STMT);
+ x = build_gimple_modify_stmt (loaded_val, x);
+ bsi_insert_before (&bsi2, x, BSI_SAME_STMT);
+ SSA_NAME_DEF_STMT (loaded_val) = x;
+ }
+ else
+ {
+ x = build_gimple_modify_stmt (loaded_val, x);
+ force_gimple_operand_bsi (&bsi2, x, true, NULL_TREE,
+ true, BSI_SAME_STMT);
+ }
+ }
bsi_remove (&bsi, true);
bsi = bsi_last (store_bb);
gcc_assert (TREE_CODE (bsi_stmt (bsi)) == OMP_ATOMIC_STORE);
- /* For floating-point values, we'll need to view-convert them to integers
- so that we can perform the atomic compare and swap. Simplify the
- following code by always setting up the "i"ntegral variables. */
- if (INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
- {
- loadedi = loaded_val;
- storedi = stored_val;
- iaddr = addr;
- }
+ if (iaddr == addr)
+ storedi = stored_val;
else
- {
- loadedi = force_gimple_operand_bsi (&bsi,
- build1 (VIEW_CONVERT_EXPR, itype,
- loaded_val), true,
- NULL_TREE, true, BSI_SAME_STMT);
- storedi =
- force_gimple_operand_bsi (&bsi,
- build1 (VIEW_CONVERT_EXPR, itype,
- stored_val), true, NULL_TREE, true,
- BSI_SAME_STMT);
- iaddr = fold_convert (build_pointer_type (itype), addr);
- }
+ storedi =
+ force_gimple_operand_bsi (&bsi,
+ build1 (VIEW_CONVERT_EXPR, itype,
+ stored_val), true, NULL_TREE, true,
+ BSI_SAME_STMT);
/* Build the compare&swap statement. */
new_storedi = build_call_expr (cmpxchg, 3, iaddr, loadedi, storedi);
fold_convert (itype, new_storedi),
true, NULL_TREE,
true, BSI_SAME_STMT);
- if (storedi == stored_val)
- new_stored = new_storedi;
- else
- new_stored = force_gimple_operand_bsi (&bsi,
- build1 (VIEW_CONVERT_EXPR, type,
- new_storedi), true,
- NULL_TREE, true, BSI_SAME_STMT);
if (gimple_in_ssa_p (cfun))
old_vali = loadedi;
else
{
old_vali = create_tmp_var (itype, NULL);
+ if (gimple_in_ssa_p (cfun))
+ add_referenced_var (old_vali);
x = build_gimple_modify_stmt (old_vali, loadedi);
- bsi_insert_before (&bsi, x, BSI_SAME_STMT);
+ force_gimple_operand_bsi (&bsi, x, true, NULL_TREE,
+ true, BSI_SAME_STMT);
- x = build_gimple_modify_stmt (loaded_val, new_stored);
- bsi_insert_before (&bsi, x, BSI_SAME_STMT);
+ x = build_gimple_modify_stmt (loadedi, new_storedi);
+ force_gimple_operand_bsi (&bsi, x, true, NULL_TREE,
+ true, BSI_SAME_STMT);
}
/* Note that we always perform the comparison as an integer, even for
floating point. This allows the atomic operation to properly
succeed even with NaNs and -0.0. */
- x = build3 (COND_EXPR, void_type_node,
- build2 (NE_EXPR, boolean_type_node,
- new_storedi, old_vali), NULL_TREE, NULL_TREE);
+ x = build2 (NE_EXPR, boolean_type_node, new_storedi, old_vali);
+ x = build3 (COND_EXPR, void_type_node, x, NULL_TREE, NULL_TREE);
bsi_insert_before (&bsi, x, BSI_SAME_STMT);
/* Update cfg. */
e = make_edge (store_bb, loop_header, EDGE_TRUE_VALUE);
- /* Copy the new value to loaded_val (we already did that before the condition
+ /* Copy the new value to loadedi (we already did that before the condition
if we are not in SSA). */
if (gimple_in_ssa_p (cfun))
{
phi = phi_nodes (loop_header);
- SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, e), new_stored);
+ SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, e), new_storedi);
}
/* Remove OMP_ATOMIC_STORE. */
return 0;
}
-/* OMP expansion in SSA form. For testing purposes only. */
-
-static bool
-gate_expand_omp_ssa (void)
-{
- return flag_openmp_ssa && flag_openmp != 0 && errorcount == 0;
-}
-
-struct tree_opt_pass pass_expand_omp_ssa =
-{
- "ompexpssa", /* name */
- gate_expand_omp_ssa, /* gate */
- execute_expand_omp, /* execute */
- NULL, /* sub */
- NULL, /* next */
- 0, /* static_pass_number */
- 0, /* tv_id */
- PROP_gimple_any, /* properties_required */
- PROP_gimple_lomp, /* properties_provided */
- 0, /* properties_destroyed */
- 0, /* todo_flags_start */
- TODO_dump_func, /* todo_flags_finish */
- 0 /* letter */
-};
-
/* OMP expansion -- the default pass, run before creation of SSA form. */
static bool
gate_expand_omp (void)
{
- return ((!flag_openmp_ssa || !optimize)
- && flag_openmp != 0 && errorcount == 0);
+ return (flag_openmp != 0 && errorcount == 0);
}
-struct tree_opt_pass pass_expand_omp =
+struct gimple_opt_pass pass_expand_omp =
{
+ {
+ GIMPLE_PASS,
"ompexp", /* name */
gate_expand_omp, /* gate */
execute_expand_omp, /* execute */
PROP_gimple_lomp, /* properties_provided */
0, /* properties_destroyed */
0, /* todo_flags_start */
- TODO_dump_func, /* todo_flags_finish */
- 0 /* letter */
+ TODO_dump_func /* todo_flags_finish */
+ }
};
\f
/* Routines to lower OpenMP directives into OMP-GIMPLE. */
pop_gimplify_context (NULL_TREE);
}
-
-/* Pass *TP back through the gimplifier within the context determined by WI.
- This handles replacement of DECL_VALUE_EXPR, as well as adjusting the
- flags on ADDR_EXPR. */
-
-static void
-lower_regimplify (tree *tp, struct walk_stmt_info *wi)
-{
- enum gimplify_status gs;
- tree pre = NULL;
-
- if (wi->is_lhs)
- gs = gimplify_expr (tp, &pre, NULL, is_gimple_lvalue, fb_lvalue);
- else if (wi->val_only)
- gs = gimplify_expr (tp, &pre, NULL, is_gimple_val, fb_rvalue);
- else
- gs = gimplify_expr (tp, &pre, NULL, is_gimple_formal_tmp_var, fb_rvalue);
- gcc_assert (gs == GS_ALL_DONE);
-
- if (pre)
- tsi_link_before (&wi->tsi, pre, TSI_SAME_STMT);
-}
-
-/* Copy EXP into a temporary. Insert the initialization statement before TSI. */
+/* Callback for lower_omp_1. Return non-NULL if *tp needs to be
+ regimplified. */
static tree
-init_tmp_var (tree exp, tree_stmt_iterator *tsi)
+lower_omp_2 (tree *tp, int *walk_subtrees, void *data ATTRIBUTE_UNUSED)
{
- tree t, stmt;
-
- t = create_tmp_var (TREE_TYPE (exp), NULL);
- DECL_GIMPLE_REG_P (t) = 1;
- stmt = build_gimple_modify_stmt (t, exp);
- SET_EXPR_LOCUS (stmt, EXPR_LOCUS (tsi_stmt (*tsi)));
- tsi_link_before (tsi, stmt, TSI_SAME_STMT);
-
- return t;
-}
-
-/* Similarly, but copy from the temporary and insert the statement
- after the iterator. */
+ tree t = *tp;
-static tree
-save_tmp_var (tree exp, tree_stmt_iterator *tsi)
-{
- tree t, stmt;
+ /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
+ if (TREE_CODE (t) == VAR_DECL && DECL_HAS_VALUE_EXPR_P (t))
+ return t;
- t = create_tmp_var (TREE_TYPE (exp), NULL);
- DECL_GIMPLE_REG_P (t) = 1;
- stmt = build_gimple_modify_stmt (exp, t);
- SET_EXPR_LOCUS (stmt, EXPR_LOCUS (tsi_stmt (*tsi)));
- tsi_link_after (tsi, stmt, TSI_SAME_STMT);
+ /* If a global variable has been privatized, TREE_CONSTANT on
+ ADDR_EXPR might be wrong. */
+ if (TREE_CODE (t) == ADDR_EXPR)
+ recompute_tree_invariant_for_addr_expr (t);
- return t;
+ *walk_subtrees = !TYPE_P (t) && !DECL_P (t);
+ return NULL_TREE;
}
-/* Callback for walk_stmts. Lower the OpenMP directive pointed by TP. */
-
-static tree
-lower_omp_1 (tree *tp, int *walk_subtrees, void *data)
+static void
+lower_omp_1 (tree *tp, omp_context *ctx, tree_stmt_iterator *tsi)
{
- struct walk_stmt_info *wi = data;
- omp_context *ctx = wi->info;
tree t = *tp;
+ if (!t)
+ return;
+
+ if (EXPR_HAS_LOCATION (t))
+ input_location = EXPR_LOCATION (t);
+
/* If we have issued syntax errors, avoid doing any heavy lifting.
Just replace the OpenMP directives with a NOP to avoid
confusing RTL expansion. */
- if (errorcount && OMP_DIRECTIVE_P (*tp))
+ if (errorcount && OMP_DIRECTIVE_P (t))
{
*tp = build_empty_stmt ();
- return NULL_TREE;
+ return;
}
- *walk_subtrees = 0;
- switch (TREE_CODE (*tp))
+ switch (TREE_CODE (t))
{
+ case STATEMENT_LIST:
+ {
+ tree_stmt_iterator i;
+ for (i = tsi_start (t); !tsi_end_p (i); tsi_next (&i))
+ lower_omp_1 (tsi_stmt_ptr (i), ctx, &i);
+ }
+ break;
+
+ case COND_EXPR:
+ lower_omp_1 (&COND_EXPR_THEN (t), ctx, NULL);
+ lower_omp_1 (&COND_EXPR_ELSE (t), ctx, NULL);
+ if (ctx
+ && walk_tree (&COND_EXPR_COND (t), lower_omp_2, ctx, NULL))
+ {
+ tree pre = NULL;
+ gimplify_expr (&COND_EXPR_COND (t), &pre, NULL,
+ is_gimple_condexpr, fb_rvalue);
+ if (pre)
+ {
+ if (tsi)
+ tsi_link_before (tsi, pre, TSI_SAME_STMT);
+ else
+ {
+ append_to_statement_list (t, &pre);
+ *tp = pre;
+ }
+ }
+ }
+ break;
+ case CATCH_EXPR:
+ lower_omp_1 (&CATCH_BODY (t), ctx, NULL);
+ break;
+ case EH_FILTER_EXPR:
+ lower_omp_1 (&EH_FILTER_FAILURE (t), ctx, NULL);
+ break;
+ case TRY_CATCH_EXPR:
+ case TRY_FINALLY_EXPR:
+ lower_omp_1 (&TREE_OPERAND (t, 0), ctx, NULL);
+ lower_omp_1 (&TREE_OPERAND (t, 1), ctx, NULL);
+ break;
+ case BIND_EXPR:
+ lower_omp_1 (&BIND_EXPR_BODY (t), ctx, NULL);
+ break;
+ case RETURN_EXPR:
+ lower_omp_1 (&TREE_OPERAND (t, 0), ctx, NULL);
+ break;
+
case OMP_PARALLEL:
ctx = maybe_lookup_ctx (t);
lower_omp_parallel (tp, ctx);
break;
-
case OMP_FOR:
ctx = maybe_lookup_ctx (t);
gcc_assert (ctx);
lower_omp_for (tp, ctx);
break;
-
case OMP_SECTIONS:
ctx = maybe_lookup_ctx (t);
gcc_assert (ctx);
lower_omp_sections (tp, ctx);
break;
-
case OMP_SINGLE:
ctx = maybe_lookup_ctx (t);
gcc_assert (ctx);
lower_omp_single (tp, ctx);
break;
-
case OMP_MASTER:
ctx = maybe_lookup_ctx (t);
gcc_assert (ctx);
lower_omp_master (tp, ctx);
break;
-
case OMP_ORDERED:
ctx = maybe_lookup_ctx (t);
gcc_assert (ctx);
lower_omp_ordered (tp, ctx);
break;
-
case OMP_CRITICAL:
ctx = maybe_lookup_ctx (t);
gcc_assert (ctx);
lower_omp_critical (tp, ctx);
break;
- case VAR_DECL:
- if (ctx && DECL_HAS_VALUE_EXPR_P (t))
+ default:
+ if (ctx && walk_tree (tp, lower_omp_2, ctx, NULL))
{
- lower_regimplify (&t, wi);
- if (wi->val_only)
+ /* The gimplifier doesn't gimplify CALL_EXPR_STATIC_CHAIN.
+ Handle that here. */
+ tree call = get_call_expr_in (t);
+ if (call
+ && CALL_EXPR_STATIC_CHAIN (call)
+ && walk_tree (&CALL_EXPR_STATIC_CHAIN (call), lower_omp_2,
+ ctx, NULL))
{
- if (wi->is_lhs)
- t = save_tmp_var (t, &wi->tsi);
- else
- t = init_tmp_var (t, &wi->tsi);
+ tree pre = NULL;
+ gimplify_expr (&CALL_EXPR_STATIC_CHAIN (call), &pre, NULL,
+ is_gimple_val, fb_rvalue);
+ if (pre)
+ {
+ if (tsi)
+ tsi_link_before (tsi, pre, TSI_SAME_STMT);
+ else
+ {
+ append_to_statement_list (t, &pre);
+ lower_omp_1 (&pre, ctx, NULL);
+ *tp = pre;
+ return;
+ }
+ }
}
- *tp = t;
- }
- break;
- case ADDR_EXPR:
- if (ctx)
- lower_regimplify (tp, wi);
- break;
-
- case ARRAY_REF:
- case ARRAY_RANGE_REF:
- case REALPART_EXPR:
- case IMAGPART_EXPR:
- case COMPONENT_REF:
- case VIEW_CONVERT_EXPR:
- if (ctx)
- lower_regimplify (tp, wi);
- break;
-
- case INDIRECT_REF:
- if (ctx)
- {
- wi->is_lhs = false;
- wi->val_only = true;
- lower_regimplify (&TREE_OPERAND (t, 0), wi);
+ if (tsi == NULL)
+ gimplify_stmt (tp);
+ else
+ {
+ tree pre = NULL;
+ gimplify_expr (tp, &pre, NULL, is_gimple_stmt, fb_none);
+ if (pre)
+ tsi_link_before (tsi, pre, TSI_SAME_STMT);
+ }
}
break;
-
- default:
- if (!TYPE_P (t) && !DECL_P (t))
- *walk_subtrees = 1;
- break;
}
-
- return NULL_TREE;
}
static void
lower_omp (tree *stmt_p, omp_context *ctx)
{
- struct walk_stmt_info wi;
-
- memset (&wi, 0, sizeof (wi));
- wi.callback = lower_omp_1;
- wi.info = ctx;
- wi.val_only = true;
- wi.want_locations = true;
-
- walk_stmts (&wi, stmt_p);
+ lower_omp_1 (stmt_p, ctx, NULL);
}
\f
/* Main entry point. */
return flag_openmp != 0;
}
-struct tree_opt_pass pass_lower_omp =
+struct gimple_opt_pass pass_lower_omp =
{
+ {
+ GIMPLE_PASS,
"omplower", /* name */
gate_lower_omp, /* gate */
execute_lower_omp, /* execute */
PROP_gimple_lomp, /* properties_provided */
0, /* properties_destroyed */
0, /* todo_flags_start */
- TODO_dump_func, /* todo_flags_finish */
- 0 /* letter */
+ TODO_dump_func /* todo_flags_finish */
+ }
};
\f
/* The following is a utility to diagnose OpenMP structured block violations.