#include "cfgloop.h"
-/* Lowering of OpenMP parallel and workshare constructs proceeds in two
+/* Lowering of OpenMP parallel and workshare constructs proceeds in two
phases. The first phase scans the function looking for OMP statements
and then for variables that must be replaced to satisfy data sharing
clauses. The second phase expands code for the constructs, as well as
struct omp_context *outer;
gimple stmt;
- /* Map variables to fields in a structure that allows communication
+ /* Map variables to fields in a structure that allows communication
between sending and receiving threads. */
splay_tree field_map;
tree record_type;
struct omp_for_data_loop *loop;
int i;
struct omp_for_data_loop dummy_loop;
+ location_t loc = gimple_location (for_stmt);
fd->for_stmt = for_stmt;
fd->pre = NULL;
else
loop = &dummy_loop;
-
+
loop->v = gimple_omp_for_index (for_stmt, i);
gcc_assert (SSA_VAR_P (loop->v));
gcc_assert (TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
break;
case LE_EXPR:
if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
- loop->n2 = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (loop->n2),
+ loop->n2 = fold_build2_loc (loc,
+ POINTER_PLUS_EXPR, TREE_TYPE (loop->n2),
loop->n2, size_one_node);
else
- loop->n2 = fold_build2 (PLUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
+ loop->n2 = fold_build2_loc (loc,
+ PLUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
build_int_cst (TREE_TYPE (loop->n2), 1));
loop->cond_code = LT_EXPR;
break;
case GE_EXPR:
if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
- loop->n2 = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (loop->n2),
+ loop->n2 = fold_build2_loc (loc,
+ POINTER_PLUS_EXPR, TREE_TYPE (loop->n2),
loop->n2, size_int (-1));
else
- loop->n2 = fold_build2 (MINUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
+ loop->n2 = fold_build2_loc (loc,
+ MINUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
build_int_cst (TREE_TYPE (loop->n2), 1));
loop->cond_code = GT_EXPR;
break;
break;
case MINUS_EXPR:
loop->step = TREE_OPERAND (t, 1);
- loop->step = fold_build1 (NEGATE_EXPR, TREE_TYPE (loop->step),
+ loop->step = fold_build1_loc (loc,
+ NEGATE_EXPR, TREE_TYPE (loop->step),
loop->step);
break;
default:
tree n;
if (loop->cond_code == LT_EXPR)
- n = fold_build2 (PLUS_EXPR, TREE_TYPE (loop->v),
+ n = fold_build2_loc (loc,
+ PLUS_EXPR, TREE_TYPE (loop->v),
loop->n2, loop->step);
else
n = loop->n1;
if (loop->cond_code == LT_EXPR)
{
n1 = loop->n1;
- n2 = fold_build2 (PLUS_EXPR, TREE_TYPE (loop->v),
+ n2 = fold_build2_loc (loc,
+ PLUS_EXPR, TREE_TYPE (loop->v),
loop->n2, loop->step);
}
else
{
- n1 = fold_build2 (MINUS_EXPR, TREE_TYPE (loop->v),
+ n1 = fold_build2_loc (loc,
+ MINUS_EXPR, TREE_TYPE (loop->v),
loop->n2, loop->step);
n2 = loop->n1;
}
itype
= lang_hooks.types.type_for_size (TYPE_PRECISION (itype), 0);
t = build_int_cst (itype, (loop->cond_code == LT_EXPR ? -1 : 1));
- t = fold_build2 (PLUS_EXPR, itype,
- fold_convert (itype, loop->step), t);
- t = fold_build2 (PLUS_EXPR, itype, t,
- fold_convert (itype, loop->n2));
- t = fold_build2 (MINUS_EXPR, itype, t,
- fold_convert (itype, loop->n1));
+ t = fold_build2_loc (loc,
+ PLUS_EXPR, itype,
+ fold_convert_loc (loc, itype, loop->step), t);
+ t = fold_build2_loc (loc, PLUS_EXPR, itype, t,
+ fold_convert_loc (loc, itype, loop->n2));
+ t = fold_build2_loc (loc, MINUS_EXPR, itype, t,
+ fold_convert_loc (loc, itype, loop->n1));
if (TYPE_UNSIGNED (itype) && loop->cond_code == GT_EXPR)
- t = fold_build2 (TRUNC_DIV_EXPR, itype,
- fold_build1 (NEGATE_EXPR, itype, t),
- fold_build1 (NEGATE_EXPR, itype,
- fold_convert (itype,
- loop->step)));
+ t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype,
+ fold_build1_loc (loc, NEGATE_EXPR, itype, t),
+ fold_build1_loc (loc, NEGATE_EXPR, itype,
+ fold_convert_loc (loc, itype,
+ loop->step)));
else
- t = fold_build2 (TRUNC_DIV_EXPR, itype, t,
- fold_convert (itype, loop->step));
- t = fold_convert (long_long_unsigned_type_node, t);
+ t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, t,
+ fold_convert_loc (loc, itype, loop->step));
+ t = fold_convert_loc (loc, long_long_unsigned_type_node, t);
if (count != NULL_TREE)
- count = fold_build2 (MULT_EXPR, long_long_unsigned_type_node,
+ count = fold_build2_loc (loc,
+ MULT_EXPR, long_long_unsigned_type_node,
count, t);
else
count = t;
if (collapse_count && *collapse_count == NULL)
{
if (count)
- *collapse_count = fold_convert (iter_type, count);
+ *collapse_count = fold_convert_loc (loc, iter_type, count);
else
*collapse_count = create_tmp_var (iter_type, ".count");
}
# BLOCK 2 (PAR_ENTRY_BB)
.omp_data_o.i = i;
#pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598)
-
+
# BLOCK 3 (WS_ENTRY_BB)
.omp_data_i = &.omp_data_o;
D.1667 = .omp_data_i->i;
hack something up here, it is really not worth the aggravation. */
static bool
-workshare_safe_to_combine_p (basic_block par_entry_bb, basic_block ws_entry_bb)
+workshare_safe_to_combine_p (basic_block ws_entry_bb)
{
struct omp_for_data fd;
- gimple par_stmt, ws_stmt;
-
- par_stmt = last_stmt (par_entry_bb);
- ws_stmt = last_stmt (ws_entry_bb);
+ gimple ws_stmt = last_stmt (ws_entry_bb);
if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
return true;
get_ws_args_for (gimple ws_stmt)
{
tree t;
+ location_t loc = gimple_location (ws_stmt);
if (gimple_code (ws_stmt) == GIMPLE_OMP_FOR)
{
ws_args = NULL_TREE;
if (fd.chunk_size)
{
- t = fold_convert (long_integer_type_node, fd.chunk_size);
+ t = fold_convert_loc (loc, long_integer_type_node, fd.chunk_size);
ws_args = tree_cons (NULL, t, ws_args);
}
- t = fold_convert (long_integer_type_node, fd.loop.step);
+ t = fold_convert_loc (loc, long_integer_type_node, fd.loop.step);
ws_args = tree_cons (NULL, t, ws_args);
- t = fold_convert (long_integer_type_node, fd.loop.n2);
+ t = fold_convert_loc (loc, long_integer_type_node, fd.loop.n2);
ws_args = tree_cons (NULL, t, ws_args);
- t = fold_convert (long_integer_type_node, fd.loop.n1);
+ t = fold_convert_loc (loc, long_integer_type_node, fd.loop.n1);
ws_args = tree_cons (NULL, t, ws_args);
return ws_args;
if (single_succ (par_entry_bb) == ws_entry_bb
&& single_succ (ws_exit_bb) == par_exit_bb
- && workshare_safe_to_combine_p (par_entry_bb, ws_entry_bb)
+ && workshare_safe_to_combine_p (ws_entry_bb)
&& (gimple_omp_parallel_combined_p (last_stmt (par_entry_bb))
|| (last_and_only_stmt (ws_entry_bb)
&& last_and_only_stmt (par_exit_bb))))
fprintf (file, "%*sbb %d: GIMPLE_OMP_CONTINUE\n", indent, "",
region->cont->index);
}
-
+
if (region->exit)
fprintf (file, "%*sbb %d: GIMPLE_OMP_RETURN\n", indent, "",
region->exit->index);
ctx->cb.dst_node = ctx->cb.src_node;
ctx->cb.src_cfun = cfun;
ctx->cb.copy_decl = omp_copy_decl;
- ctx->cb.eh_region = -1;
+ ctx->cb.eh_lp_nr = 0;
ctx->cb.transform_call_graph_edges = CB_CGE_MOVE;
ctx->depth = 1;
}
decl = build_decl (gimple_location (ctx->stmt),
FUNCTION_DECL, name, type);
- decl = lang_hooks.decls.pushdecl (decl);
if (!task_copy)
ctx->cb.dst_fn = decl;
DECL_ARGUMENTS (decl) = t;
}
- /* Allocate memory for the function structure. The call to
+ /* Allocate memory for the function structure. The call to
allocate_struct_function clobbers CFUN, so we need to restore
it afterward. */
push_struct_function (decl);
omp_context *ctx;
tree name, t;
gimple stmt = gsi_stmt (*gsi);
+ location_t loc = gimple_location (stmt);
/* Ignore task directives with empty bodies. */
if (optimize > 0
fixup_child_record_type (ctx);
if (ctx->srecord_type)
layout_type (ctx->srecord_type);
- t = fold_convert (long_integer_type_node,
+ t = fold_convert_loc (loc, long_integer_type_node,
TYPE_SIZE_UNIT (ctx->record_type));
gimple_omp_task_set_arg_size (stmt, t);
t = build_int_cst (long_integer_type_node,
tree
omp_reduction_init (tree clause, tree type)
{
+ location_t loc = OMP_CLAUSE_LOCATION (clause);
switch (OMP_CLAUSE_REDUCTION_CODE (clause))
{
case PLUS_EXPR:
case TRUTH_ORIF_EXPR:
case TRUTH_XOR_EXPR:
case NE_EXPR:
- return fold_convert (type, integer_zero_node);
+ return fold_convert_loc (loc, type, integer_zero_node);
case MULT_EXPR:
case TRUTH_AND_EXPR:
case TRUTH_ANDIF_EXPR:
case EQ_EXPR:
- return fold_convert (type, integer_one_node);
+ return fold_convert_loc (loc, type, integer_one_node);
case BIT_AND_EXPR:
- return fold_convert (type, integer_minus_one_node);
+ return fold_convert_loc (loc, type, integer_minus_one_node);
case MAX_EXPR:
if (SCALAR_FLOAT_TYPE_P (type))
/* Do all the fixed sized types in the first pass, and the variable sized
types in the second pass. This makes sure that the scalar arguments to
- the variable sized types are processed before we use them in the
+ the variable sized types are processed before we use them in the
variable sized operations. */
for (pass = 0; pass < 2; ++pass)
{
enum omp_clause_code c_kind = OMP_CLAUSE_CODE (c);
tree var, new_var;
bool by_ref;
+ location_t clause_loc = OMP_CLAUSE_LOCATION (c);
switch (c_kind)
{
gimple_seq_add_stmt (ilist, stmt);
- x = fold_convert (TREE_TYPE (ptr), tmp);
+ x = fold_convert_loc (clause_loc, TREE_TYPE (ptr), tmp);
gimplify_assign (ptr, x, ilist);
}
}
if (c_kind == OMP_CLAUSE_FIRSTPRIVATE && is_task_ctx (ctx))
{
x = build_receiver_ref (var, false, ctx);
- x = build_fold_addr_expr (x);
+ x = build_fold_addr_expr_loc (clause_loc, x);
}
else if (TREE_CONSTANT (x))
{
name);
gimple_add_tmp_var (x);
TREE_ADDRESSABLE (x) = 1;
- x = build_fold_addr_expr (x);
+ x = build_fold_addr_expr_loc (clause_loc, x);
}
else
{
- x = build_call_expr (built_in_decls[BUILT_IN_ALLOCA], 1, x);
+ x = build_call_expr_loc (clause_loc,
+ built_in_decls[BUILT_IN_ALLOCA], 1, x);
}
- x = fold_convert (TREE_TYPE (new_var), x);
+ x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
gimplify_assign (new_var, x, ilist);
- new_var = build_fold_indirect_ref (new_var);
+ new_var = build_fold_indirect_ref_loc (clause_loc, new_var);
}
else if (c_kind == OMP_CLAUSE_REDUCTION
&& OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
/* ??? If VAR is not passed by reference, and the variable
hasn't been initialized yet, then we'll get a warning for
the store into the omp_data_s structure. Ideally, we'd be
- able to notice this and not store anything at all, but
+ able to notice this and not store anything at all, but
we're generating code too early. Suppress the warning. */
if (!by_ref)
TREE_NO_WARNING (var) = 1;
x = build_outer_var_ref (var, ctx);
if (is_reference (var))
- x = build_fold_addr_expr (x);
+ x = build_fold_addr_expr_loc (clause_loc, x);
SET_DECL_VALUE_EXPR (placeholder, x);
DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
lower_omp (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
for (c = clauses; c ;)
{
tree var, new_var;
+ location_t clause_loc = OMP_CLAUSE_LOCATION (c);
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE)
{
x = build_outer_var_ref (var, ctx);
if (is_reference (var))
- new_var = build_fold_indirect_ref (new_var);
+ new_var = build_fold_indirect_ref_loc (clause_loc, new_var);
x = lang_hooks.decls.omp_clause_assign_op (c, x, new_var);
gimplify_and_add (x, stmt_list);
}
{
tree var, ref, new_var;
enum tree_code code;
+ location_t clause_loc = OMP_CLAUSE_LOCATION (c);
if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
continue;
var = OMP_CLAUSE_DECL (c);
new_var = lookup_decl (var, ctx);
if (is_reference (var))
- new_var = build_fold_indirect_ref (new_var);
+ new_var = build_fold_indirect_ref_loc (clause_loc, new_var);
ref = build_outer_var_ref (var, ctx);
code = OMP_CLAUSE_REDUCTION_CODE (c);
if (count == 1)
{
- tree addr = build_fold_addr_expr (ref);
+ tree addr = build_fold_addr_expr_loc (clause_loc, ref);
addr = save_expr (addr);
ref = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (addr)), addr);
- x = fold_build2 (code, TREE_TYPE (ref), ref, new_var);
+ x = fold_build2_loc (clause_loc, code, TREE_TYPE (ref), ref, new_var);
x = build2 (OMP_ATOMIC, void_type_node, addr, x);
gimplify_and_add (x, stmt_seqp);
return;
tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
if (is_reference (var))
- ref = build_fold_addr_expr (ref);
+ ref = build_fold_addr_expr_loc (clause_loc, ref);
SET_DECL_VALUE_EXPR (placeholder, ref);
DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
lower_omp (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
{
tree var, ref, x;
bool by_ref;
+ location_t clause_loc = OMP_CLAUSE_LOCATION (c);
if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYPRIVATE)
continue;
ref = build_sender_ref (var, ctx);
x = lookup_decl_in_outer_ctx (var, ctx);
- x = by_ref ? build_fold_addr_expr (x) : x;
+ x = by_ref ? build_fold_addr_expr_loc (clause_loc, x) : x;
gimplify_assign (ref, x, slist);
ref = build_receiver_ref (var, by_ref, ctx);
if (is_reference (var))
{
- ref = build_fold_indirect_ref (ref);
- var = build_fold_indirect_ref (var);
+ ref = build_fold_indirect_ref_loc (clause_loc, ref);
+ var = build_fold_indirect_ref_loc (clause_loc, var);
}
x = lang_hooks.decls.omp_clause_assign_op (c, var, ref);
gimplify_and_add (x, rlist);
{
tree val, ref, x, var;
bool by_ref, do_in = false, do_out = false;
+ location_t clause_loc = OMP_CLAUSE_LOCATION (c);
switch (OMP_CLAUSE_CODE (c))
{
if (do_in)
{
ref = build_sender_ref (val, ctx);
- x = by_ref ? build_fold_addr_expr (var) : var;
+ x = by_ref ? build_fold_addr_expr_loc (clause_loc, var) : var;
gimplify_assign (ref, x, ilist);
if (is_task_ctx (ctx))
DECL_ABSTRACT_ORIGIN (TREE_OPERAND (ref, 1)) = NULL;
}
-/* Build the function calls to GOMP_parallel_start etc to actually
+/* Build the function calls to GOMP_parallel_start etc to actually
generate the parallel operation. REGION is the parallel region
being expanded. BB is the block where to insert the code. WS_ARGS
will be set if this is a call to a combined parallel+workshare
gimple_stmt_iterator gsi;
gimple stmt;
int start_ix;
+ location_t clause_loc;
clauses = gimple_omp_parallel_clauses (entry_stmt);
c = find_omp_clause (clauses, OMP_CLAUSE_NUM_THREADS);
if (c)
- val = OMP_CLAUSE_NUM_THREADS_EXPR (c);
+ {
+ val = OMP_CLAUSE_NUM_THREADS_EXPR (c);
+ clause_loc = OMP_CLAUSE_LOCATION (c);
+ }
+ else
+ clause_loc = gimple_location (entry_stmt);
/* Ensure 'val' is of the correct type. */
- val = fold_convert (unsigned_type_node, val);
+ val = fold_convert_loc (clause_loc, unsigned_type_node, val);
/* If we found the clause 'if (cond)', build either
(cond != 0) or (cond ? val : 1u). */
cond = gimple_boolify (cond);
if (integer_zerop (val))
- val = fold_build2 (EQ_EXPR, unsigned_type_node, cond,
+ val = fold_build2_loc (clause_loc,
+ EQ_EXPR, unsigned_type_node, cond,
build_int_cst (TREE_TYPE (cond), 0));
else
{
{
gimple phi = create_phi_node (tmp_join, bb);
SSA_NAME_DEF_STMT (tmp_join) = phi;
- add_phi_arg (phi, tmp_then, e_then);
- add_phi_arg (phi, tmp_else, e_else);
+ add_phi_arg (phi, tmp_then, e_then, UNKNOWN_LOCATION);
+ add_phi_arg (phi, tmp_else, e_else, UNKNOWN_LOCATION);
}
val = tmp_join;
tree args = tree_cons (NULL, t2,
tree_cons (NULL, t1,
tree_cons (NULL, val, ws_args)));
- t = build_function_call_expr (built_in_decls[start_ix], args);
+ t = build_function_call_expr (UNKNOWN_LOCATION,
+ built_in_decls[start_ix], args);
}
else
t = build_call_expr (built_in_decls[start_ix], 3, t2, t1, val);
t = null_pointer_node;
else
t = build_fold_addr_expr (t);
- t = build_call_expr (gimple_omp_parallel_child_fn (entry_stmt), 1, t);
+ t = build_call_expr_loc (gimple_location (entry_stmt),
+ gimple_omp_parallel_child_fn (entry_stmt), 1, t);
force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
false, GSI_CONTINUE_LINKING);
- t = build_call_expr (built_in_decls[BUILT_IN_GOMP_PARALLEL_END], 0);
+ t = build_call_expr_loc (gimple_location (entry_stmt),
+ built_in_decls[BUILT_IN_GOMP_PARALLEL_END], 0);
force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
false, GSI_CONTINUE_LINKING);
}
{
tree t, t1, t2, t3, flags, cond, c, clauses;
gimple_stmt_iterator gsi;
+ location_t loc = gimple_location (entry_stmt);
clauses = gimple_omp_task_clauses (entry_stmt);
if (t == NULL)
t2 = null_pointer_node;
else
- t2 = build_fold_addr_expr (t);
- t1 = build_fold_addr_expr (gimple_omp_task_child_fn (entry_stmt));
+ t2 = build_fold_addr_expr_loc (loc, t);
+ t1 = build_fold_addr_expr_loc (loc, gimple_omp_task_child_fn (entry_stmt));
t = gimple_omp_task_copy_fn (entry_stmt);
if (t == NULL)
t3 = null_pointer_node;
else
- t3 = build_fold_addr_expr (t);
+ t3 = build_fold_addr_expr_loc (loc, t);
t = build_call_expr (built_in_decls[BUILT_IN_GOMP_TASK], 7, t1, t2, t3,
gimple_omp_task_arg_size (entry_stmt),
static gimple_seq
maybe_catch_exception (gimple_seq body)
{
- gimple f, t;
+ gimple g;
+ tree decl;
if (!flag_exceptions)
return body;
if (lang_protect_cleanup_actions)
- t = lang_protect_cleanup_actions ();
+ decl = lang_protect_cleanup_actions ();
else
- t = gimple_build_call (built_in_decls[BUILT_IN_TRAP], 0);
-
- f = gimple_build_eh_filter (NULL, gimple_seq_alloc_with_stmt (t));
- gimple_eh_filter_set_must_not_throw (f, true);
+ decl = built_in_decls[BUILT_IN_TRAP];
- t = gimple_build_try (body, gimple_seq_alloc_with_stmt (f),
+ g = gimple_build_eh_must_not_throw (decl);
+ g = gimple_build_try (body, gimple_seq_alloc_with_stmt (g),
GIMPLE_TRY_CATCH);
- return gimple_seq_alloc_with_stmt (t);
+ return gimple_seq_alloc_with_stmt (g);
}
/* Chain all the DECLs in LIST by their TREE_CHAIN fields. */
continue;
if (TREE_CODE (TREE_TYPE (decl)) != FUNCTION_TYPE
- || TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (decl)))
- != TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (built_in))))
+ || !types_compatible_p (TREE_TYPE (TREE_TYPE (decl)),
+ TREE_TYPE (TREE_TYPE (built_in))))
continue;
gimple_call_set_fndecl (call, built_in);
}
/* Move the parallel region into CHILD_CFUN. */
-
+
if (gimple_in_ssa_p (cfun))
{
push_cfun (child_cfun);
current_function_decl = save_current;
pop_cfun ();
}
-
+
/* Emit a library call to launch the children threads. */
if (gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL)
expand_parallel_call (region, new_bb, entry_stmt, ws_args);
t = fold_convert (itype, t);
nthreads = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
true, GSI_SAME_STMT);
-
+
t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
t = fold_convert (itype, t);
threadid = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
false, GSI_CONTINUE_LINKING);
stmt = gimple_build_assign (fd->loop.v, t);
gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
-
+
t = fold_convert (itype, e0);
t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
if (POINTER_TYPE_P (type))
find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
find_edge (cont_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
-
+
set_immediate_dominator (CDI_DOMINATORS, seq_start_bb, entry_bb);
set_immediate_dominator (CDI_DOMINATORS, body_bb,
recompute_dominator (CDI_DOMINATORS, body_bb));
t = fold_convert (itype, t);
nthreads = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
true, GSI_SAME_STMT);
-
+
t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
t = fold_convert (itype, t);
threadid = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
t = build2 (fd->loop.cond_code, boolean_type_node, v_back, e);
gsi_insert_before (&si, gimple_build_cond_empty (t), GSI_SAME_STMT);
-
+
/* Remove GIMPLE_OMP_CONTINUE. */
gsi_remove (&si, true);
gsi_next (&psi), ++i)
{
gimple nphi;
+ source_location locus;
phi = gsi_stmt (psi);
t = gimple_phi_result (phi);
SSA_NAME_DEF_STMT (t) = nphi;
t = PHI_ARG_DEF_FROM_EDGE (phi, se);
+ locus = gimple_phi_arg_location_from_edge (phi, se);
+
/* A special case -- fd->loop.v is not yet computed in
iter_part_bb, we need to use v_extra instead. */
if (t == fd->loop.v)
t = v_extra;
- add_phi_arg (nphi, t, ene);
- add_phi_arg (nphi, redirect_edge_var_map_def (vm), re);
+ add_phi_arg (nphi, t, ene, locus);
+ locus = redirect_edge_var_map_location (vm);
+ add_phi_arg (nphi, redirect_edge_var_map_def (vm), re, locus);
}
gcc_assert (!gsi_end_p (psi) && i == VEC_length (edge_var_map, head));
redirect_edge_var_map_clear (re);
/* Make phi node for trip. */
phi = create_phi_node (trip_main, iter_part_bb);
SSA_NAME_DEF_STMT (trip_main) = phi;
- add_phi_arg (phi, trip_back, single_succ_edge (trip_update_bb));
- add_phi_arg (phi, trip_init, single_succ_edge (entry_bb));
+ add_phi_arg (phi, trip_back, single_succ_edge (trip_update_bb),
+ UNKNOWN_LOCATION);
+ add_phi_arg (phi, trip_init, single_succ_edge (entry_bb),
+ UNKNOWN_LOCATION);
}
set_immediate_dominator (CDI_DOMINATORS, trip_update_bb, cont_bb);
static void
expand_omp_sections (struct omp_region *region)
{
- tree t, u, vin = NULL, vmain, vnext, l1, l2;
+ tree t, u, vin = NULL, vmain, vnext, l2;
VEC (tree,heap) *label_vec;
unsigned len;
basic_block entry_bb, l0_bb, l1_bb, l2_bb, default_bb;
l2_bb = region->exit;
if (exit_reachable)
{
- if (single_pred (l2_bb) == l0_bb)
+ if (single_pred_p (l2_bb) && single_pred (l2_bb) == l0_bb)
l2 = gimple_block_label (l2_bb);
else
{
}
}
default_bb = create_empty_bb (l1_bb->prev_bb);
- l1 = gimple_block_label (l1_bb);
}
else
{
default_bb = create_empty_bb (l0_bb);
- l1 = NULL_TREE;
l2 = gimple_block_label (default_bb);
}
basic_block store_bb = single_succ (load_bb);
gimple_stmt_iterator gsi;
gimple stmt;
+ location_t loc;
/* We expect to find the following sequences:
-
+
load_bb:
GIMPLE_OMP_ATOMIC_LOAD (tmp, mem)
store_bb:
val = tmp OP something; (or: something OP tmp)
- GIMPLE_OMP_STORE (val)
+ GIMPLE_OMP_STORE (val)
- ???FIXME: Allow a more flexible sequence.
+ ???FIXME: Allow a more flexible sequence.
Perhaps use data flow to pick the statements.
-
+
*/
gsi = gsi_after_labels (store_bb);
stmt = gsi_stmt (gsi);
+ loc = gimple_location (stmt);
if (!is_gimple_assign (stmt))
return false;
gsi_next (&gsi);
gsi = gsi_last_bb (load_bb);
gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_LOAD);
- call = build_call_expr (decl, 2, addr, fold_convert (itype, rhs));
- call = fold_convert (void_type_node, call);
+ call = build_call_expr_loc (loc,
+ decl, 2, addr,
+ fold_convert_loc (loc, itype, rhs));
+ call = fold_convert_loc (loc, void_type_node, call);
force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
gsi_remove (&gsi, true);
}
/* Note that we always perform the comparison as an integer, even for
- floating point. This allows the atomic operation to properly
+ floating point. This allows the atomic operation to properly
succeed even with NaNs and -0.0. */
stmt = gimple_build_cond_empty
(build2 (NE_EXPR, boolean_type_node,
references are within #pragma omp atomic directives. According to
responses received from omp@openmp.org, appears to be within spec.
Which makes sense, since that's how several other compilers handle
- this situation as well.
+ this situation as well.
LOADED_VAL and ADDR are the operands of GIMPLE_OMP_ATOMIC_LOAD we're
expanding. STORED_VAL is the operand of the matching
GIMPLE_OMP_ATOMIC_STORE.
- We replace
- GIMPLE_OMP_ATOMIC_LOAD (loaded_val, addr) with
+ We replace
+ GIMPLE_OMP_ATOMIC_LOAD (loaded_val, addr) with
loaded_val = *addr;
and replace
GIMPLE_OMP_ATOMIC_ATORE (stored_val) with
- *addr = stored_val;
+ *addr = stored_val;
*/
static bool
gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
t = built_in_decls[BUILT_IN_GOMP_ATOMIC_START];
- t = build_function_call_expr (t, 0);
+ t = build_function_call_expr (UNKNOWN_LOCATION, t, 0);
force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
stmt = gimple_build_assign (loaded_val, build_fold_indirect_ref (addr));
gsi_insert_before (&si, stmt, GSI_SAME_STMT);
t = built_in_decls[BUILT_IN_GOMP_ATOMIC_END];
- t = build_function_call_expr (t, 0);
+ t = build_function_call_expr (UNKNOWN_LOCATION, t, 0);
force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
gsi_remove (&si, true);
return true;
}
-/* Expand an GIMPLE_OMP_ATOMIC statement. We try to expand
- using expand_omp_atomic_fetch_op. If it failed, we try to
+/* Expand an GIMPLE_OMP_ATOMIC statement. We try to expand
+ using expand_omp_atomic_fetch_op. If it failed, we try to
call expand_omp_atomic_pipeline, and if it fails too, the
ultimate fallback is wrapping the operation in a mutex
- (expand_omp_atomic_mutex). REGION is the atomic region built
- by build_omp_regions_1(). */
+ (expand_omp_atomic_mutex). REGION is the atomic region built
+ by build_omp_regions_1(). */
static void
expand_omp_atomic (struct omp_region *region)
return (flag_openmp != 0 && errorcount == 0);
}
-struct gimple_opt_pass pass_expand_omp =
+struct gimple_opt_pass pass_expand_omp =
{
{
GIMPLE_PASS,
gimple_seq_add_seq (&body, l);
gimple_omp_section_set_last (sec_start);
}
-
+
gimple_seq_add_stmt (&body, gimple_build_omp_return (false));
}
gimple_seq_add_stmt (pre_p, call);
cond = gimple_build_cond (EQ_EXPR, lhs,
- fold_convert (TREE_TYPE (lhs), boolean_true_node),
+ fold_convert_loc (loc, TREE_TYPE (lhs),
+ boolean_true_node),
tlabel, flabel);
gimple_seq_add_stmt (pre_p, cond);
gimple_seq_add_stmt (pre_p, gimple_build_label (tlabel));
l1 = create_artificial_label (loc);
l2 = create_artificial_label (loc);
- t = build_call_expr (built_in_decls[BUILT_IN_GOMP_SINGLE_COPY_START], 0);
- t = fold_convert (ptr_type, t);
+ t = build_call_expr_loc (loc, built_in_decls[BUILT_IN_GOMP_SINGLE_COPY_START], 0);
+ t = fold_convert_loc (loc, ptr_type, t);
gimplify_assign (ctx->receiver_decl, t, pre_p);
t = build2 (EQ_EXPR, boolean_type_node, ctx->receiver_decl,
lower_copyprivate_clauses (gimple_omp_single_clauses (single_stmt), pre_p,
©in_seq, ctx);
- t = build_fold_addr_expr (ctx->sender_decl);
- t = build_call_expr (built_in_decls[BUILT_IN_GOMP_SINGLE_COPY_END], 1, t);
+ t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
+ t = build_call_expr_loc (loc, built_in_decls[BUILT_IN_GOMP_SINGLE_COPY_END],
+ 1, t);
gimplify_and_add (t, pre_p);
t = build_and_jump (&l2);
bind_body = maybe_catch_exception (bind_body);
- t = gimple_build_omp_return
+ t = gimple_build_omp_return
(!!find_omp_clause (gimple_omp_single_clauses (single_stmt),
OMP_CLAUSE_NOWAIT));
gimple_seq_add_stmt (&bind_body, t);
{
tree block, lab = NULL, x;
gimple stmt = gsi_stmt (*gsi_p), bind;
+ location_t loc = gimple_location (stmt);
gimple_seq tseq;
struct gimplify_ctx gctx;
bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt),
block);
- x = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
+ x = build_call_expr_loc (loc, built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
x = build2 (EQ_EXPR, boolean_type_node, x, integer_zero_node);
x = build3 (COND_EXPR, void_type_node, x, NULL, build_and_jump (&lab));
tseq = NULL;
tree block;
tree name, lock, unlock;
gimple stmt = gsi_stmt (*gsi_p), bind;
+ location_t loc = gimple_location (stmt);
gimple_seq tbody;
struct gimplify_ctx gctx;
decl = (tree) n->value;
lock = built_in_decls[BUILT_IN_GOMP_CRITICAL_NAME_START];
- lock = build_call_expr (lock, 1, build_fold_addr_expr (decl));
+ lock = build_call_expr_loc (loc, lock, 1, build_fold_addr_expr_loc (loc, decl));
unlock = built_in_decls[BUILT_IN_GOMP_CRITICAL_NAME_END];
- unlock = build_call_expr (unlock, 1, build_fold_addr_expr (decl));
+ unlock = build_call_expr_loc (loc, unlock, 1,
+ build_fold_addr_expr_loc (loc, decl));
}
else
{
lock = built_in_decls[BUILT_IN_GOMP_CRITICAL_START];
- lock = build_call_expr (lock, 0);
+ lock = build_call_expr_loc (loc, lock, 0);
unlock = built_in_decls[BUILT_IN_GOMP_CRITICAL_END];
- unlock = build_call_expr (unlock, 0);
+ unlock = build_call_expr_loc (loc, unlock, 0);
}
push_gimplify_context (&gctx);
tree clauses, cond, vinit;
enum tree_code cond_code;
gimple_seq stmts;
-
+
cond_code = fd->loop.cond_code;
cond_code = cond_code == LT_EXPR ? GE_EXPR : LE_EXPR;
tree *rhs_p, block;
struct omp_for_data fd;
gimple stmt = gsi_stmt (*gsi_p), new_stmt;
- gimple_seq omp_for_body, body, dlist, ilist;
+ gimple_seq omp_for_body, body, dlist;
size_t i;
struct gimplify_ctx gctx;
}
/* The pre-body and input clauses go before the lowered GIMPLE_OMP_FOR. */
- ilist = NULL;
dlist = NULL;
body = NULL;
lower_rec_input_clauses (gimple_omp_for_clauses (stmt), &body, &dlist, ctx);
gsi_replace (gsi_p, new_stmt, true);
}
-/* Callback for walk_stmts. Check if the current statement only contains
+/* Callback for walk_stmts. Check if the current statement only contains
GIMPLE_OMP_FOR or GIMPLE_OMP_PARALLEL. */
static tree
splay_tree_node n;
struct omp_taskcopy_context tcctx;
struct gimplify_ctx gctx;
+ location_t loc = gimple_location (task_stmt);
child_fn = gimple_omp_task_copy_fn (task_stmt);
child_cfun = DECL_STRUCT_FUNCTION (child_fn);
tcctx.cb.dst_node = tcctx.cb.src_node;
tcctx.cb.src_cfun = ctx->cb.src_cfun;
tcctx.cb.copy_decl = task_copyfn_copy_decl;
- tcctx.cb.eh_region = -1;
+ tcctx.cb.eh_lp_nr = 0;
tcctx.cb.transform_call_graph_edges = CB_CGE_MOVE;
tcctx.cb.decl_map = pointer_map_create ();
tcctx.ctx = ctx;
n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
sf = (tree) n->value;
sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
- src = build_fold_indirect_ref (sarg);
+ src = build_fold_indirect_ref_loc (loc, sarg);
src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
t = build2 (MODIFY_EXPR, TREE_TYPE (*p), *p, src);
append_to_statement_list (t, &list);
sf = (tree) n->value;
if (tcctx.cb.decl_map)
sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
- src = build_fold_indirect_ref (sarg);
+ src = build_fold_indirect_ref_loc (loc, sarg);
src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
- dst = build_fold_indirect_ref (arg);
+ dst = build_fold_indirect_ref_loc (loc, arg);
dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
append_to_statement_list (t, &list);
sf = (tree) n->value;
if (tcctx.cb.decl_map)
sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
- src = build_fold_indirect_ref (sarg);
+ src = build_fold_indirect_ref_loc (loc, sarg);
src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
if (use_pointer_for_field (decl, NULL) || is_reference (decl))
- src = build_fold_indirect_ref (src);
+ src = build_fold_indirect_ref_loc (loc, src);
}
else
src = decl;
- dst = build_fold_indirect_ref (arg);
+ dst = build_fold_indirect_ref_loc (loc, arg);
dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
append_to_statement_list (t, &list);
sf = (tree) n->value;
if (tcctx.cb.decl_map)
sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
- src = build_fold_indirect_ref (sarg);
+ src = build_fold_indirect_ref_loc (loc, sarg);
src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
if (use_pointer_for_field (decl, NULL))
- src = build_fold_indirect_ref (src);
+ src = build_fold_indirect_ref_loc (loc, src);
}
else
src = decl;
- dst = build_fold_indirect_ref (arg);
+ dst = build_fold_indirect_ref_loc (loc, arg);
dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
append_to_statement_list (t, &list);
(splay_tree_key) TREE_OPERAND (ind, 0));
sf = (tree) n->value;
sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
- src = build_fold_indirect_ref (sarg);
+ src = build_fold_indirect_ref_loc (loc, sarg);
src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
- src = build_fold_indirect_ref (src);
- dst = build_fold_indirect_ref (arg);
+ src = build_fold_indirect_ref_loc (loc, src);
+ dst = build_fold_indirect_ref_loc (loc, arg);
dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
append_to_statement_list (t, &list);
(splay_tree_key) TREE_OPERAND (ind, 0));
df = (tree) n->value;
df = *(tree *) pointer_map_contains (tcctx.cb.decl_map, df);
- ptr = build_fold_indirect_ref (arg);
+ ptr = build_fold_indirect_ref_loc (loc, arg);
ptr = build3 (COMPONENT_REF, TREE_TYPE (df), ptr, df, NULL);
t = build2 (MODIFY_EXPR, TREE_TYPE (ptr), ptr,
- build_fold_addr_expr (dst));
+ build_fold_addr_expr_loc (loc, dst));
append_to_statement_list (t, &list);
}
gimple par_bind, bind;
gimple_seq par_body, olist, ilist, par_olist, par_ilist, new_body;
struct gimplify_ctx gctx;
+ location_t loc = gimple_location (stmt);
clauses = gimple_omp_taskreg_clauses (stmt);
par_bind = gimple_seq_first_stmt (gimple_omp_body (stmt));
if (ctx->record_type)
{
- t = build_fold_addr_expr (ctx->sender_decl);
+ t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
/* fixup_child_record_type might have changed receiver_decl's type. */
- t = fold_convert (TREE_TYPE (ctx->receiver_decl), t);
+ t = fold_convert_loc (loc, TREE_TYPE (ctx->receiver_decl), t);
gimple_seq_add_stmt (&new_body,
gimple_build_assign (ctx->receiver_decl, t));
}
return 0;
}
-struct gimple_opt_pass pass_lower_omp =
+struct gimple_opt_pass pass_lower_omp =
{
{
GIMPLE_PASS,
if (label_ctx == branch_ctx)
return false;
-
+
/*
Previously we kept track of the label's entire context in diagnose_sb_[12]
so we could traverse it and issue a correct "exit" or "enter" error
for issuing exit/enter error messages. If someone really misses the
distinct error message... patches welcome.
*/
-
+
#if 0
/* Try to avoid confusing the user by producing and error message
with correct "exit" or "enter" verbiage. We prefer "exit"
switch (gimple_code (stmt))
{
WALK_SUBSTMTS;
-
+
case GIMPLE_OMP_PARALLEL:
case GIMPLE_OMP_TASK:
case GIMPLE_OMP_SECTIONS:
wi->info = context;
break;
+ case GIMPLE_COND:
+ {
+ tree lab = gimple_cond_true_label (stmt);
+ if (lab)
+ {
+ n = splay_tree_lookup (all_labels,
+ (splay_tree_key) lab);
+ diagnose_sb_0 (gsi_p, context,
+ n ? (gimple) n->value : NULL);
+ }
+ lab = gimple_cond_false_label (stmt);
+ if (lab)
+ {
+ n = splay_tree_lookup (all_labels,
+ (splay_tree_key) lab);
+ diagnose_sb_0 (gsi_p, context,
+ n ? (gimple) n->value : NULL);
+ }
+ }
+ break;
+
case GIMPLE_GOTO:
{
tree lab = gimple_goto_dest (stmt);
return NULL_TREE;
}
-void
-diagnose_omp_structured_block_errors (tree fndecl)
+static unsigned int
+diagnose_omp_structured_block_errors (void)
{
- tree save_current = current_function_decl;
struct walk_stmt_info wi;
- struct function *old_cfun = cfun;
- gimple_seq body = gimple_body (fndecl);
-
- current_function_decl = fndecl;
- set_cfun (DECL_STRUCT_FUNCTION (fndecl));
+ gimple_seq body = gimple_body (current_function_decl);
all_labels = splay_tree_new (splay_tree_compare_pointers, 0, 0);
splay_tree_delete (all_labels);
all_labels = NULL;
- set_cfun (old_cfun);
- current_function_decl = save_current;
+ return 0;
}
+static bool
+gate_diagnose_omp_blocks (void)
+{
+ return flag_openmp != 0;
+}
+
+struct gimple_opt_pass pass_diagnose_omp_blocks =
+{
+ {
+ GIMPLE_PASS,
+ "*diagnose_omp_blocks", /* name */
+ gate_diagnose_omp_blocks, /* gate */
+ diagnose_omp_structured_block_errors, /* execute */
+ NULL, /* sub */
+ NULL, /* next */
+ 0, /* static_pass_number */
+ TV_NONE, /* tv_id */
+ PROP_gimple_any, /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ 0, /* todo_flags_finish */
+ }
+};
+
#include "gt-omp-low.h"